Merge pull request #1 from hashicorp/master

Update packer repo
This commit is contained in:
Etienne Deneuve 2019-10-11 14:34:54 +02:00 committed by GitHub
commit e574e9c4f6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8061 changed files with 1294431 additions and 218681 deletions

202
.circleci/config.yml Normal file
View File

@ -0,0 +1,202 @@
orbs:
win: circleci/windows@1.0.0
version: 2.1
executors:
golang:
docker:
- image: circleci/golang:1.13
darwin:
macos:
xcode: "9.0"
commands:
install-go-run-tests-unix:
parameters:
GOOS:
type: string
GOVERSION:
type: string
steps:
- checkout
- run: curl https://dl.google.com/go/go<< parameters.GOVERSION >>.<< parameters.GOOS >>-amd64.tar.gz | tar -C ~/ -xz
- run: ~/go/bin/go test ./...
install-go-run-tests-windows:
parameters:
GOVERSION:
type: string
steps:
- checkout
- run: curl https://dl.google.com/go/go<< parameters.GOVERSION >>.windows-amd64.zip --output ~/go<< parameters.GOVERSION >>.windows-amd64.zip
- run: unzip ~/go<< parameters.GOVERSION >>.windows-amd64.zip -d ~/
- run: ~/go/bin/go test ./...
build-and-persist-packer-binary:
parameters:
GOOS:
type: string
steps:
- checkout
- run: GOOS=<< parameters.GOOS >> go build -ldflags="-s -w" -o ./pkg/packer_<< parameters.GOOS >>_$(go env GOARCH) .
- run: zip ./pkg/packer_<< parameters.GOOS >>_$(go env GOARCH).zip ./pkg/packer_<< parameters.GOOS >>_$(go env GOARCH)
- run: rm ./pkg/packer_<< parameters.GOOS >>_$(go env GOARCH)
- persist_to_workspace:
root: .
paths:
- ./pkg/
# Golang CircleCI 2.0 configuration file
#
# Check https://circleci.com/docs/2.0/language-go/ for more details
jobs:
test-linux:
executor: golang
working_directory: /go/src/github.com/hashicorp/packer
steps:
- checkout
- run: make ci
test-darwin:
executor: darwin
working_directory: ~/go/src/github.com/hashicorp/packer
environment:
GO111MODULE: "off"
steps:
- install-go-run-tests-unix:
GOOS: darwin
GOVERSION: "1.13"
test-windows:
executor:
name: win/vs2019
shell: bash.exe
steps:
- install-go-run-tests-windows:
GOVERSION: "1.13"
check-vendor-vs-mod:
executor: golang
working_directory: /go/src/github.com/hashicorp/packer
environment:
GO111MODULE: "off"
steps:
- checkout
- run: GO111MODULE=on go run . --help
- run: make check-vendor-vs-mod
check-fmt:
executor: golang
steps:
- checkout
- run: make fmt-check
check-generate:
executor: golang
working_directory: /go/src/github.com/hashicorp/packer
steps:
- checkout
- run: make generate-check
build_linux:
executor: golang
steps:
- build-and-persist-packer-binary:
GOOS: linux
build_windows:
executor: golang
working_directory: /go/src/github.com/hashicorp/packer
steps:
- build-and-persist-packer-binary:
GOOS: windows
build_darwin:
executor: golang
working_directory: /go/src/github.com/hashicorp/packer
steps:
- build-and-persist-packer-binary:
GOOS: darwin
build_freebsd:
executor: golang
working_directory: /go/src/github.com/hashicorp/packer
steps:
- build-and-persist-packer-binary:
GOOS: freebsd
build_solaris:
executor: golang
working_directory: /go/src/github.com/hashicorp/packer
steps:
- build-and-persist-packer-binary:
GOOS: solaris
build_openbsd:
executor: golang
working_directory: /go/src/github.com/hashicorp/packer
steps:
- build-and-persist-packer-binary:
GOOS: openbsd
store_artifacts:
executor: golang
steps:
- attach_workspace:
at: .
- store_artifacts:
path: ./pkg/
destination: /
publish-github-tag-release:
docker:
- image: cibuilds/github:0.10
working_directory: /go/src/github.com/hashicorp/packer
steps:
- attach_workspace:
at: .
- run: |
ghr -prerelease -t ${GITHUB_TOKEN_AZR} -u ${CIRCLE_PROJECT_USERNAME} -r ${CIRCLE_PROJECT_REPONAME} -c ${CIRCLE_SHA1} -delete ${CIRCLE_TAG} ./pkg/
workflows:
version: 2
test:
jobs:
- test-linux
- test-darwin
- test-windows
check-code:
jobs:
- check-vendor-vs-mod
- check-fmt
- check-generate
build_packer_binaries:
jobs:
- build_linux:
filters:
tags:
only: /.*/
- build_darwin:
filters:
tags:
only: /.*/
- build_windows:
filters:
tags:
only: /.*/
- build_freebsd:
filters:
tags:
only: /.*/
- build_openbsd:
filters:
tags:
only: /.*/
- build_solaris:
filters:
tags:
only: /.*/
- store_artifacts:
requires:
- build_linux
- build_darwin
- build_windows
- build_freebsd
- build_openbsd
- build_solaris
- publish-github-tag-release:
requires:
- build_linux
- build_darwin
- build_windows
- build_freebsd
- build_openbsd
- build_solaris
filters:
branches:
ignore: /.*/
tags:
only: nightly

9
.gitattributes vendored
View File

@ -1,2 +1,7 @@
common/test-fixtures/root/* eol=lf
* text=auto
*.go text eol=lf
*.sh text eol=lf
*.json text eol=lf
*.md text eol=lf
*.ps1 text eol=lf
common/test-fixtures/root/* eol=lf

303
.github/CONTRIBUTING.md vendored Normal file
View File

@ -0,0 +1,303 @@
# Contributing to Packer
**First:** if you're unsure or afraid of _anything_, just ask or submit the
issue or pull request anyway. You won't be yelled at for giving your best
effort. The worst that can happen is that you'll be politely asked to change
something. We appreciate any sort of contributions, and don't want a wall of
rules to get in the way of that.
However, for those individuals who want a bit more guidance on the best way to
contribute to the project, read on. This document will cover what we're looking
for. By addressing all the points we're looking for, it raises the chances we
can quickly merge or address your contributions.
## Issues
### Reporting an Issue
* Make sure you test against the latest released version. It is possible we
already fixed the bug you're experiencing.
* Run the command with debug output with the environment variable `PACKER_LOG`.
For example: `PACKER_LOG=1 packer build template.json`. Take the _entire_
output and create a [gist](https://gist.github.com) for linking to in your
issue. Packer should strip sensitive keys from the output, but take a look
through just in case.
* Provide a reproducible test case. If a contributor can't reproduce an issue,
then it dramatically lowers the chances it'll get fixed. And in some cases,
the issue will eventually be closed.
* Respond promptly to any questions made by the Packer team to your issue. Stale
issues will be closed.
### Issue Lifecycle
1. The issue is reported.
2. The issue is verified and categorized by a Packer collaborator.
Categorization is done via tags. For example, bugs are marked as "bugs" and
simple fixes are marked as "good first issue".
3. Unless it is critical, the issue is left for a period of time (sometimes many
weeks), giving outside contributors a chance to address the issue.
4. The issue is addressed in a pull request or commit. The issue will be
referenced in the commit message so that the code that fixes it is clearly
linked.
5. The issue is closed.
## Setting up Go
If you have never worked with Go before, you will have to install its
runtime in order to build packer.
1. This project always releases from the latest version of golang. [Install go](https://golang.org/doc/install#install)
## Setting up Packer for dev
If/when you have go installed you can already `go get` packer and `make` in
order to compile and test Packer. These instructions target
POSIX-like environments (macOS, Linux, Cygwin, etc.) so you may need to
adjust them for Windows or other shells.
The instructions below are for go 1.7. or later.
1. Download the Packer source (and its dependencies) by running
`go get github.com/hashicorp/packer`. This will download the Packer source to
`$GOPATH/src/github.com/hashicorp/packer`.
2. When working on Packer, first `cd $GOPATH/src/github.com/hashicorp/packer`
so you can run `make` and easily access other files. Run `make help` to get
information about make targets.
3. Make your changes to the Packer source. You can run `make` in
`$GOPATH/src/github.com/hashicorp/packer` to run tests and build the Packer
binary. Any compilation errors will be shown when the binaries are
rebuilding. If you don't have `make` you can simply run
`go build -o bin/packer .` from the project root.
4. After running building Packer successfully, use
`$GOPATH/src/github.com/hashicorp/packer/bin/packer` to build a machine and
verify your changes work. For instance:
`$GOPATH/src/github.com/hashicorp/packer/bin/packer build template.json`.
5. If everything works well and the tests pass, run `go fmt` on your code before
submitting a pull-request.
### Windows Systems
On windows systems you need at least the [MinGW Tools](http://www.mingw.org/), e.g. install via [choco](https://chocolatey.org/):
```
choco install mingw -y
```
This installs the GCC compiler, as well as a ```mingw32-make``` which can be used wherever
this documentation mentions ```make```
when building using ```go``` you also need to mention the windows
executable extension
```
go build -o bin/packer.exe
```
### Opening an Pull Request
Thank you for contributing! When you are ready to open a pull-request, you will
need to [fork
Packer](https://github.com/hashicorp/packer#fork-destination-box), push your
changes to your fork, and then open a pull-request.
For example, my github username is `cbednarski`, so I would do the following:
```
git checkout -b f-my-feature
# Develop a patch.
git push https://github.com/cbednarski/Packer f-my-feature
```
From there, open your fork in your browser to open a new pull-request.
**Note:** Go infers package names from their file paths. This means `go build`
will break if you `git clone` your fork instead of using `go get` on the main
Packer project.
**Note:** See '[Working with
forks](https://help.github.com/articles/working-with-forks/)' for a better way
to use `git push ...`.
### Pull Request Lifecycle
1. You are welcome to submit your pull request for commentary or review before
it is fully completed. Please prefix the title of your pull request with
"[WIP]" to indicate this. It's also a good idea to include specific questions
or items you'd like feedback on.
2. Once you believe your pull request is ready to be merged, you can remove any
"[WIP]" prefix from the title and a core team member will review.
3. One of Packer's core team members will look over your contribution and
either merge, or provide comments letting you know if there is anything left
to do. We do our best to provide feedback in a timely manner, but it may take
some time for us to respond. We may also have questions that we need answered
about the code, either because something doesn't make sense to us or because
we want to understand your thought process.
4. If we have requested changes, you can either make those changes or, if you
disagree with the suggested changes, we can have a conversation about our
reasoning and agree on a path forward. This may be a multi-step process. Our
view is that pull requests are a chance to collaborate, and we welcome
conversations about how to do things better. It is the contributor's
responsibility to address any changes requested. While reviewers are happy to
give guidance, it is unsustainable for us to perform the coding work necessary
to get a PR into a mergeable state.
5. Once all outstanding comments and checklist items have been addressed, your
contribution will be merged! Merged PRs will be included in the next
Packer release. The core team takes care of updating the
[CHANGELOG.md](../CHANGELOG.md) as they merge.
6. In rare cases, we might decide that a PR should be closed without merging.
We'll make sure to provide clear reasoning when this happens.
### Tips for Working on Packer
#### Getting Your Pull Requests Merged Faster
It is much easier to review pull requests that are:
1. Well-documented: Try to explain in the pull request comments what your
change does, why you have made the change, and provide instructions for how
to produce the new behavior introduced in the pull request. If you can,
provide screen captures or terminal output to show what the changes look
like. This helps the reviewers understand and test the change.
2. Small: Try to only make one change per pull request. If you found two bugs
and want to fix them both, that's _awesome_, but it's still best to submit
the fixes as separate pull requests. This makes it much easier for reviewers
to keep in their heads all of the implications of individual code changes,
and that means the PR takes less effort and energy to merge. In general, the
smaller the pull request, the sooner reviewers will be able to make time to
review it.
3. Passing Tests: Based on how much time we have, we may not review pull
requests which aren't passing our tests. (Look below for advice on how to
run unit tests). If you need help figuring out why tests are failing, please
feel free to ask, but while we're happy to give guidance it is generally
your responsibility to make sure that tests are passing. If your pull request
changes an interface or invalidates an assumption that causes a bunch of
tests to fail, then you need to fix those tests before we can merge your PR.
If we request changes, try to make those changes in a timely manner. Otherwise,
PRs can go stale and be a lot more work for all of us to merge in the future.
Even with everyone making their best effort to be responsive, it can be
time-consuming to get a PR merged. It can be frustrating to deal with
the back-and-forth as we make sure that we understand the changes fully. Please
bear with us, and please know that we appreciate the time and energy you put
into the project.
#### Working on forks
The easiest way to work on a fork is to set it as a remote of the Packer
project. After following the steps in "Setting up Go to work on Packer":
1. Navigate to the code:
`cd $GOPATH/src/github.com/hashicorp/packer`
2. Add the remote by running:
`git remote add <name of remote> <github url of fork>`
For example:
`git remote add mwhooker https://github.com/mwhooker/packer.git`
3. Checkout a feature branch:
`git checkout -b new-feature`
4. Make changes.
5. (Optional) Push your changes to the fork:
`git push -u <name of remote> new-feature`
This way you can push to your fork to create a PR, but the code on disk still
lives in the spot where the go cli tools are expecting to find it.
#### Go modules & go vendor
If you are submitting a change that requires new or updated dependencies,
please include them in `go.mod`/`go.sum` and in the `vendor/` folder. This
helps everything get tested properly in CI.
Note that you will need to use [go
mod](https://github.com/golang/go/wiki/Modules) to do this. This step is
recommended but not required.
Use `go get <project>` to add dependencies to the project and `go mod vendor`
to make vendored copy of dependencies. See [go mod quick
start](https://github.com/golang/go/wiki/Modules#quick-start) for examples.
Please only apply the minimal vendor changes to get your PR to work. Packer
does not attempt to track the latest version for each dependency.
#### Running Unit Tests
You can run tests for individual packages using commands like this:
```
make test TEST=./builder/amazon/...
```
#### Running Acceptance Tests
Packer has [acceptance tests](https://en.wikipedia.org/wiki/Acceptance_testing)
for various builders. These typically require an API key (AWS, GCE), or
additional software to be installed on your computer (VirtualBox, VMware).
If you're working on a new builder or builder feature and want to verify it is
functioning (and also hasn't broken anything else), we recommend running the
acceptance tests.
**Warning:** The acceptance tests create/destroy/modify _real resources_, which
may incur costs for real money. In the presence of a bug, it is possible that
resources may be left behind, which can cost money even though you were not
using them. We recommend running tests in an account used only for that purpose
so it is easy to see if there are any dangling resources, and so production
resources are not accidentally destroyed or overwritten during testing.
To run the acceptance tests, invoke `make testacc`:
```
make testacc TEST=./builder/amazon/ebs
...
```
The `TEST` variable lets you narrow the scope of the acceptance tests to a
specific package / folder. The `TESTARGS` variable is recommended to filter down
to a specific resource to test, since testing all of them at once can sometimes
take a very long time.
To run only a specific test, use the `-run` argument:
```
make testacc TEST=./builder/amazon/ebs TESTARGS="-run TestBuilderAcc_forceDeleteSnapshot"
```
Acceptance tests typically require other environment variables to be set for
things such as API tokens and keys. Each test should error and tell you which
credentials are missing, so those are not documented here.
#### Debugging Plugins
Each packer plugin runs in a separate process and communicates with RCP over a
socket therefore using a debugger will not work (be complicated at least).
But most of the Packer code is really simple and easy to follow with PACKER_LOG
turned on. If that doesn't work adding some extra debug print outs when you have
homed in on the problem is usually enough.

View File

@ -1,26 +0,0 @@
_Please read these instructions before submitting_
**DELETE THIS TEMPLATE BEFORE SUBMITTING**
_Only use Github issues to report bugs or feature requests, see
https://www.packer.io/community.html_
For example, _Timeouts waiting for SSH/WinRM_ are generally not bugs within packer and are better addressed by the mailing list. Ask on the mailing list if you are unsure.
If you are planning to open a pull-request just open the pull-request instead of making an issue first.
FOR FEATURES:
Describe the feature you want and your use case _clearly_.
FOR BUGS:
Describe the problem and include the following information:
- Packer version from `packer version`
- Host platform
- Debug log output from `PACKER_LOG=1 packer build template.json`.
Please paste this in a gist https://gist.github.com
- The _simplest example template and scripts_ needed to reproduce the bug.
Include these in your gist https://gist.github.com

40
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@ -0,0 +1,40 @@
---
name: Bug Report
about: You're experiencing an issue with Packer that is different than the documented behavior.
labels: bug
---
When filing a bug, please include the following headings if possible. Any
example text in this template can be deleted.
#### Overview of the Issue
A paragraph or two about the issue you're experiencing.
#### Reproduction Steps
Steps to reproduce this issue
### Packer version
From `packer version`
### Simplified Packer Buildfile
If the file is longer than a few dozen lines, please include the URL to the
[gist](https://gist.github.com/) of the log or use the [Github detailed
format](https://gist.github.com/ericclemmons/b146fe5da72ca1f706b2ef72a20ac39d)
instead of posting it directly in the issue.
### Operating system and Environment details
OS, Architecture, and any other information you can provide about the
environment.
### Log Fragments and crash.log files
Include appropriate log fragments. If the log is longer than a few dozen lines,
please include the URL to the [gist](https://gist.github.com/) of the log or
use the [Github detailed format](https://gist.github.com/ericclemmons/b146fe5da72ca1f706b2ef72a20ac39d) instead of posting it directly in the issue.
Set the env var `PACKER_LOG=1` for maximum log detail.

View File

@ -0,0 +1,18 @@
---
name: Feature Request
about: If you have something you think Packer could improve or add support for.
labels: enhancement
---
Please search the existing issues for relevant feature requests, and use the
reaction feature
(https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/)
to add upvotes to pre-existing requests.
#### Feature Description
A written overview of the feature.
#### Use Case(s)
Any relevant use-cases that you see.

15
.github/ISSUE_TEMPLATE/question.md vendored Normal file
View File

@ -0,0 +1,15 @@
---
name: Question
about: If you have a question, please check out our other community resources instead of opening an issue.
labels: question
---
Issues on GitHub are intended to be related to bugs or feature requests, so we
recommend using our other community resources instead of asking here if you
have a question.
- Packer Guides: https://www.packer.io/guides/index.html
- Discussion List: https://groups.google.com/group/packer-tool
- Any other questions can be sent to the packer section of the HashiCorp
forum: https://discuss.hashicorp.com/c/packer
- Packer community links: https://www.packer.io/community.html

View File

@ -0,0 +1,24 @@
---
name: SSH or WinRM times out
about: I have a waiting SSH or WinRM error.
labels: communicator-question
---
Got one of the following errors ? See if the related guides can help.
* `Waiting for WinRM to become available` ?
- See our basic WinRm Packer guide: https://www.packer.io/guides/automatic-operating-system-installs/autounattend_windows.html
* `Waiting for SSH to become available` ?
- See our basic SSH Packer guide: https://www.packer.io/guides/automatic-operating-system-installs/preseed_ubuntu.html
Issues on GitHub are intended to be related to bugs or feature requests, so we recommend using our other community resources instead of asking here if you have a question.
- Packer Guides: https://www.packer.io/guides/index.html
- Discussion List: https://groups.google.com/group/packer-tool
- Any other questions can be sent to the packer section of the HashiCorp
forum: https://discuss.hashicorp.com/c/packer
- Packer community links: https://www.packer.io/community.html

View File

@ -1,5 +1,11 @@
**DELETE THIS TEMPLATE BEFORE SUBMITTING**
In order to have a good experience with our community, we recommend that you
read the contributing guidelines for making a PR, and understand the lifecycle
of a Packer PR:
https://github.com/hashicorp/packer/blob/master/.github/CONTRIBUTING.md#opening-an-pull-request
Describe the change you are making here!
Please include tests. Check out these examples:

5
.gitignore vendored
View File

@ -4,6 +4,7 @@
/src
/website/.sass-cache
/website/build
/website/tmp
.DS_Store
.vagrant
.idea
@ -23,4 +24,6 @@ packer-test*.log
.idea/
*.iml
Thumbs.db
/packer.exe
/packer.exe
.project
cache

View File

@ -1,20 +1,19 @@
env:
- USER=travis
- USER=travis GO111MODULE=off
os:
- osx
sudo: false
language: go
go:
- 1.7.x
- 1.8.x
- 1.x
install:
- make deps
- 1.12.x
script:
- GOMAXPROCS=2 make ci
- df -h
- travis_wait make ci
branches:
only:

File diff suppressed because it is too large Load Diff

View File

@ -2,17 +2,63 @@
# builders
/builder/alicloud/ dongxiao.zzh@alibaba-inc.com
/builder/amazon/ebssurrogate/ @jen20
/builder/amazon/ebsvolume/ @jen20
/builder/azure/ @boumenot
/builder/hyperv/ @taliesins
/builder/lxc/ @ChrisLundquist
/builder/lxd/ @ChrisLundquist
/builder/oneandone/ @jasmingacic
/builder/oracle/ @prydie @owainlewis
/builder/profitbricks/ @jasmingacic
/builder/triton/ @jen20 @sean-
/builder/alicloud/ @chhaj5236
/website/source/docs/builders/alicloud* @chhaj5236
/builder/azure/ @paulmey
/website/source/docs/builders/azure* @paulmey
/builder/hyperv/ @taliesins
/website/source/docs/builders/hyperv* @taliesins
/builder/jdcloud/ @XiaohanLiang @remrain
/website/source/docs/builders/jdcloud* @XiaohanLiang @remrain
/builder/linode/ @displague @ctreatma @stvnjacobs
/website/source/docs/builders/linode* @displague @ctreatma @stvnjacobs
/builder/lxc/ @ChrisLundquist
/website/source/docs/builders/lxc* @ChrisLundquist
/builder/lxd/ @ChrisLundquist
/website/source/docs/builders/lxd* @ChrisLundquist
/builder/oneandone/ @jasmingacic
/website/source/docs/builders/oneandone* @jasmingacic
/builder/oracle/ @prydie @owainlewis
/website/source/docs/builders/oracle* @prydie @owainlewis
/builder/profitbricks/ @jasmingacic
/website/source/docs/builders/profitbricks* @jasmingacic
/builder/triton/ @sean-
/website/source/docs/builders/triton* @sean-
/builder/ncloud/ @YuSungDuk
/website/source/docs/builders/ncloud* @YuSungDuk
/builder/proxmox/ @carlpett
/website/source/docs/builders/proxmox* @carlpett
/builder/scaleway/ @sieben @mvaude @jqueuniet @fflorens @brmzkw
/website/source/docs/builders/scaleway* @sieben @mvaude @jqueuniet @fflorens @brmzkw
/builder/hcloud/ @LKaemmerling
/website/source/docs/builders/hcloud* @LKaemmerling
/builder/hyperone/ @m110 @gregorybrzeski @ad-m
/website/source/docs/builders/hyperone* @m110 @gregorybrzeski @ad-m
/builder/ucloud/ @shawnmssu
/website/source/docs/builders/ucloud* @shawnmssu
/builder/yandex/ @GennadySpb @alexanderKhaustov @seukyaso
/website/source/docs/builders/yandex* @GennadySpb @alexanderKhaustov @seukyaso
/builder/osc/ @marinsalinas
/website/source/docs/builders/osc* @marinsalinas
# provisioners
@ -20,7 +66,9 @@
/provisioner/converge/ @stevendborrelli
# post-processors
/post-processor/alicloud-import/ dongxiao.zzh@alibaba-inc.com
/post-processor/checksum/ v.tolstov@selfip.ru
/post-processor/exoscale-import/ @falzm @mcorbin
/post-processor/googlecompute-export/ crunkleton@google.com
/post-processor/vsphere-template/ nelson@bennu.cl

View File

@ -1,164 +0,0 @@
# Contributing to Packer
**First:** if you're unsure or afraid of _anything_, just ask
or submit the issue or pull request anyways. You won't be yelled at for
giving your best effort. The worst that can happen is that you'll be
politely asked to change something. We appreciate any sort of contributions,
and don't want a wall of rules to get in the way of that.
However, for those individuals who want a bit more guidance on the
best way to contribute to the project, read on. This document will cover
what we're looking for. By addressing all the points we're looking for,
it raises the chances we can quickly merge or address your contributions.
## Issues
### Reporting an Issue
* Make sure you test against the latest released version. It is possible
we already fixed the bug you're experiencing.
* Run the command with debug ouput with the environment variable
`PACKER_LOG`. For example: `PACKER_LOG=1 packer build template.json`. Take
the *entire* output and create a [gist](https://gist.github.com) for linking
to in your issue. Packer should strip sensitive keys from the output,
but take a look through just in case.
* Provide a reproducible test case. If a contributor can't reproduce an
issue, then it dramatically lowers the chances it'll get fixed. And in
some cases, the issue will eventually be closed.
* Respond promptly to any questions made by the Packer team to your issue.
Stale issues will be closed.
### Issue Lifecycle
1. The issue is reported.
2. The issue is verified and categorized by a Packer collaborator.
Categorization is done via tags. For example, bugs are marked as "bugs"
and easy fixes are marked as "easy".
3. Unless it is critical, the issue is left for a period of time (sometimes
many weeks), giving outside contributors a chance to address the issue.
4. The issue is addressed in a pull request or commit. The issue will be
referenced in the commit message so that the code that fixes it is clearly
linked.
5. The issue is closed.
## Setting up Go to work on Packer
If you have never worked with Go before, you will have to complete the
following steps in order to be able to compile and test Packer. These instructions target POSIX-like environments (Mac OS X, Linux, Cygwin, etc.) so you may need to adjust them for Windows or other shells.
1. [Download](https://golang.org/dl) and install Go. The instructions below
are for go 1.7. Earlier versions of Go are no longer supported.
2. Set and export the `GOPATH` environment variable and update your `PATH`. For
example, you can add to your `.bash_profile`.
```
export GOPATH=$HOME/go
export PATH=$PATH:$GOPATH/bin
```
3. Download the Packer source (and its dependencies) by running `go get
github.com/hashicorp/packer`. This will download the Packer source to
`$GOPATH/src/github.com/hashicorp/packer`.
4. When working on packer `cd $GOPATH/src/github.com/hashicorp/packer` so you
can run `make` and easily access other files. Run `make help` to get
information about make targets.
5. Make your changes to the Packer source. You can run `make` in
`$GOPATH/src/github.com/hashicorp/packer` to run tests and build the packer
binary. Any compilation errors will be shown when the binaries are
rebuilding. If you don't have `make` you can simply run `go build -o bin/packer .` from the project root.
6. After running building packer successfully, use
`$GOPATH/src/github.com/hashicorp/packer/bin/packer` to build a machine and
verify your changes work. For instance: `$GOPATH/src/github.com/hashicorp/packer/bin/packer build template.json`.
7. If everything works well and the tests pass, run `go fmt` on your code
before submitting a pull-request.
### Opening an Pull Request
When you are ready to open a pull-request, you will need to [fork packer](https://github.com/hashicorp/packer#fork-destination-box), push your changes to your fork, and then open a pull-request.
For example, my github username is `cbednarski` so I would do the following:
git checkout -b f-my-feature
// develop a patch
git push https://github.com/cbednarski/packer f-my-feature
From there, open your fork in your browser to open a new pull-request.
**Note** Go infers package names from their filepaths. This means `go build` will break if you `git clone` your fork instead of using `go get` on the main packer project.
### Tips for Working on Packer
#### Working on forks
The easiest way to work on a fork is to set it as a remote of the packer project. After following the steps in "Setting up Go to work on Packer":
1. Navigate to $GOPATH/src/github.com/hashicorp/packer
2. Add the remote `git remote add <name of remote> <github url of fork>`. For example `git remote add mwhooker https://github.com/mwhooker/packer.git`.
3. Checkout a feature branch: `git checkout -b new-feature`
4. Make changes
5. (Optional) Push your changes to the fork: `git push -u <name of remote> new-feature`
This way you can push to your fork to create a PR, but the code on disk still lives in the spot where the go cli tools are expecting to find it.
#### Govendor
If you are submitting a change that requires new or updated dependencies, please include them in `vendor/vendor.json` and in the `vendor/` folder. This helps everything get tested properly in CI.
Note that you will need to use [govendor](https://github.com/kardianos/govendor) to do this. This step is recommended but not required; if you don't use govendor please indicate in your PR which dependencies have changed and to what versions.
Use `govendor fetch <project>` to add dependencies to the project. See
[govendor quick
start](https://github.com/kardianos/govendor#quick-start-also-see-the-faq) for
examples.
Please only apply the minimal vendor changes to get your PR to work. Packer does not attempt to track the latest version for each dependency.
#### Running Unit Tests
You can run tests for individual packages using commands like this:
$ make test TEST=./builder/amazon/...
#### Running Acceptance Tests
Packer has [acceptance tests](https://en.wikipedia.org/wiki/Acceptance_testing)
for various builders. These typically require an API key (AWS, GCE), or
additional software to be installed on your computer (VirtualBox, VMware).
If you're working on a new builder or builder feature and want verify it is functioning (and also hasn't broken anything else), we recommend running the
acceptance tests.
**Warning:** The acceptance tests create/destroy/modify *real resources*, which
may incur costs for real money. In the presence of a bug, it is possible that resources may be left behind, which can cost money even though you were not using them. We recommend running tests in an account used only for that purpose so it is easy to see if there are any dangling resources, and so production resources are not accidentally destroyed or overwritten during testing.
To run the acceptance tests, invoke `make testacc`:
$ make testacc TEST=./builder/amazon/ebs
...
The `TEST` variable lets you narrow the scope of the acceptance tests to a
specific package / folder. The `TESTARGS` variable is recommended to filter
down to a specific resource to test, since testing all of them at once can
sometimes take a very long time.
To run only a specific test, use the `-run` argument:
```
make testacc TEST=./builder/amazon/ebs TESTARGS="-run TestBuilderAcc_forceDeleteSnapshot"
```
Acceptance tests typically require other environment variables to be set for
things such as API tokens and keys. Each test should error and tell you which
credentials are missing, so those are not documented here.

30
Dockerfile Normal file
View File

@ -0,0 +1,30 @@
FROM ubuntu:16.04
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get install -y \
locales \
openssh-server \
sudo
RUN locale-gen en_US.UTF-8
RUN if ! getent passwd vagrant; then useradd -d /home/vagrant -m -s /bin/bash vagrant; fi \
&& echo 'vagrant ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers \
&& mkdir -p /etc/sudoers.d \
&& echo 'vagrant ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers.d/vagrant \
&& chmod 0440 /etc/sudoers.d/vagrant
RUN mkdir -p /home/vagrant/.ssh \
&& chmod 0700 /home/vagrant/.ssh \
&& wget --no-check-certificate \
https://raw.github.com/hashicorp/vagrant/master/keys/vagrant.pub \
-O /home/vagrant/.ssh/authorized_keys \
&& chmod 0600 /home/vagrant/.ssh/authorized_keys \
&& chown -R vagrant /home/vagrant/.ssh
RUN mkdir -p /run/sshd
CMD /usr/sbin/sshd -D \
-o UseDNS=no \
-o PidFile=/tmp/sshd.pid

116
Makefile
View File

@ -1,14 +1,15 @@
TEST?=$(shell go list ./... | grep -v vendor)
VET?=$(shell ls -d */ | grep -v vendor | grep -v website)
TEST?=$(shell go list ./...)
VET?=$(shell go list ./...)
# Get the current full sha from git
GITSHA:=$(shell git rev-parse HEAD)
# Get the current local branch name from git (if we can, this may be blank)
GITBRANCH:=$(shell git symbolic-ref --short HEAD 2>/dev/null)
GOFMT_FILES?=$$(find . -not -path "./vendor/*" -name "*.go")
GOOS=$(shell go env GOOS)
GOARCH=$(shell go env GOARCH)
GOPATH=$(shell go env GOPATH)
EXECUTABLE_FILES=$(shell find . -type f -executable | egrep -v '^\./(website/[vendor|tmp]|vendor/|\.git|bin/|scripts/|pkg/)' | egrep -v '.*(\.sh|\.bats|\.git)' | egrep -v './provisioner/(ansible|inspec)/test-fixtures/exit1')
# Get the git commit
GIT_DIRTY=$(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true)
GIT_COMMIT=$(shell git rev-parse --short HEAD)
@ -17,35 +18,43 @@ GOLDFLAGS=-X $(GIT_IMPORT).GitCommit=$(GIT_COMMIT)$(GIT_DIRTY)
export GOLDFLAGS
default: deps generate test dev
.PHONY: bin checkversion ci default install-build-deps install-gen-deps fmt fmt-docs fmt-examples generate releasebin test testacc testrace
ci: deps test
default: install-build-deps install-gen-deps generate testrace dev releasebin package dev fmt fmt-check mode-check fmt-docs fmt-examples
release: deps test releasebin package ## Build a release build
ci: testrace ## Test in continuous integration
bin: deps ## Build debug/test build
@go get github.com/mitchellh/gox
release: install-build-deps test releasebin package ## Build a release build
bin: install-build-deps ## Build debug/test build
@echo "WARN: 'make bin' is for debug / test builds only. Use 'make release' for release builds."
@sh -c "$(CURDIR)/scripts/build.sh"
@GO111MODULE=auto sh -c "$(CURDIR)/scripts/build.sh"
releasebin: deps
@go get github.com/mitchellh/gox
releasebin: install-build-deps
@grep 'const VersionPrerelease = "dev"' version/version.go > /dev/null ; if [ $$? -eq 0 ]; then \
echo "ERROR: You must remove prerelease tags from version/version.go prior to release."; \
exit 1; \
fi
@sh -c "$(CURDIR)/scripts/build.sh"
@GO111MODULE=auto sh -c "$(CURDIR)/scripts/build.sh"
package:
$(if $(VERSION),,@echo 'VERSION= needed to release; Use make package skip compilation'; exit 1)
@sh -c "$(CURDIR)/scripts/dist.sh $(VERSION)"
deps:
@go get golang.org/x/tools/cmd/stringer
@go get github.com/kardianos/govendor
@govendor sync
install-build-deps: ## Install dependencies for bin build
@go get github.com/mitchellh/gox
dev: deps ## Build and install a development build
install-gen-deps: ## Install dependencies for code generation
# to avoid having to tidy our go deps, we `go get` our binaries from a temp
# dir. `go get` will change our deps and the following deps are not part of
# out code dependencies; so a go mod tidy will remove them again. `go
# install` seems to install the last tagged version and we want to install
# master.
@(cd $(TEMPDIR) && GO111MODULE=on go get github.com/mna/pigeon@master)
@(cd $(TEMPDIR) && GO111MODULE=on go get github.com/alvaroloes/enumer@master)
@go install ./cmd/struct-markdown
dev: ## Build and install a development build
@grep 'const VersionPrerelease = ""' version/version.go > /dev/null ; if [ $$? -eq 0 ]; then \
echo "ERROR: You must add prerelease tags to version/version.go prior to making a dev build."; \
exit 1; \
@ -57,11 +66,25 @@ dev: deps ## Build and install a development build
@cp $(GOPATH)/bin/packer pkg/$(GOOS)_$(GOARCH)
fmt: ## Format Go code
@gofmt -w -s $(GOFMT_FILES)
@go fmt ./...
fmt-check: ## Check go code formatting
$(CURDIR)/scripts/gofmtcheck.sh $(GOFMT_FILES)
fmt-check: fmt ## Check go code formatting
@echo "==> Checking that code complies with go fmt requirements..."
@git diff --exit-code; if [ $$? -eq 1 ]; then \
echo "Found files that are not fmt'ed."; \
echo "You can use the command: \`make fmt\` to reformat code."; \
exit 1; \
fi
mode-check: ## Check that only certain files are executable
@echo "==> Checking that only certain files are executable..."
@if [ ! -z "$(EXECUTABLE_FILES)" ]; then \
echo "These files should not be executable or they must be white listed in the Makefile:"; \
echo "$(EXECUTABLE_FILES)" | xargs -n1; \
exit 1; \
else \
echo "Check passed."; \
fi
fmt-docs:
@find ./website/source/docs -name "*.md" -exec pandoc --wrap auto --columns 79 --atx-headers -s -f "markdown_github+yaml_metadata_block" -t "markdown_github+yaml_metadata_block" {} -o {} \;
@ -71,29 +94,44 @@ fmt-examples:
# generate runs `go generate` to build the dynamically generated
# source files.
generate: deps ## Generate dynamically generated code
go generate .
gofmt -w command/plugin.go
generate: install-gen-deps ## Generate dynamically generated code
@echo "==> removing autogenerated markdown..."
@find website/source/ -type f | xargs grep -l '^<!-- Code generated' | xargs rm
go generate ./...
go fmt common/bootcommand/boot_command.go
go fmt command/plugin.go
test: deps fmt-check ## Run unit tests
@go test $(TEST) $(TESTARGS) -timeout=2m
@go tool vet $(VET) ; if [ $$? -eq 1 ]; then \
generate-check: generate ## Check go code generation is on par
@echo "==> Checking that auto-generated code is not changed..."
@git diff --exit-code; if [ $$? -eq 1 ]; then \
echo "Found diffs in go generated code."; \
echo "You can use the command: \`make generate\` to reformat code."; \
exit 1; \
fi
test: mode-check vet ## Run unit tests
@go test $(TEST) $(TESTARGS) -timeout=3m
# testacc runs acceptance tests
testacc: install-build-deps generate ## Run acceptance tests
@echo "WARN: Acceptance tests will take a long time to run and may cost money. Ctrl-C if you want to cancel."
PACKER_ACC=1 go test -v $(TEST) $(TESTARGS) -timeout=45m
testrace: mode-check vet ## Test with race detection enabled
@GO111MODULE=off go test -race $(TEST) $(TESTARGS) -timeout=3m -p=8
check-vendor-vs-mod: ## Check that go modules and vendored code are on par
@GO111MODULE=on go mod vendor
@git diff --exit-code --ignore-space-change --ignore-space-at-eol -- vendor ; if [ $$? -eq 1 ]; then \
echo "ERROR: vendor dir is not on par with go modules definition." && \
exit 1; \
fi
vet: ## Vet Go code
@go vet $(VET) ; if [ $$? -eq 1 ]; then \
echo "ERROR: Vet found problems in the code."; \
exit 1; \
fi
# testacc runs acceptance tests
testacc: deps generate ## Run acceptance tests
@echo "WARN: Acceptance tests will take a long time to run and may cost money. Ctrl-C if you want to cancel."
PACKER_ACC=1 go test -v $(TEST) $(TESTARGS) -timeout=45m
testrace: deps ## Test for race conditions
@go test -race $(TEST) $(TESTARGS) -timeout=2m
updatedeps:
@echo "INFO: Packer deps are managed by govendor. See CONTRIBUTING.md"
help:
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
.PHONY: bin checkversion ci default deps fmt fmt-docs fmt-examples generate releasebin test testacc testrace updatedeps

View File

@ -9,10 +9,10 @@
[travis]: https://travis-ci.org/hashicorp/packer
[appveyor-badge]: https://ci.appveyor.com/api/projects/status/miavlgnp989e5obc/branch/master?svg=true
[appveyor]: https://ci.appveyor.com/project/hashicorp/packer
[godoc-badge]: https://godoc.org/github.com/mitchellh/packer?status.svg
[godoc]: https://godoc.org/github.com/mitchellh/packer
[report-badge]: https://goreportcard.com/badge/github.com/mitchellh/packer
[report]: https://goreportcard.com/report/github.com/mitchellh/packer
[godoc-badge]: https://godoc.org/github.com/hashicorp/packer?status.svg
[godoc]: https://godoc.org/github.com/hashicorp/packer
[report-badge]: https://goreportcard.com/badge/github.com/hashicorp/packer
[report]: https://goreportcard.com/report/github.com/hashicorp/packer
* Website: https://www.packer.io
* IRC: `#packer-tool` on Freenode
@ -23,24 +23,8 @@ from a single source configuration.
Packer is lightweight, runs on every major operating system, and is highly
performant, creating machine images for multiple platforms in parallel. Packer
comes out of the box with support for the following platforms:
* Amazon EC2 (AMI). Both EBS-backed and instance-store AMIs
* Azure
* CloudStack
* DigitalOcean
* Docker
* Google Compute Engine
* Hyper-V
* 1&1
* OpenStack
* Oracle Cloud Infrastructure
* Parallels
* ProfitBricks
* QEMU. Both KVM and Xen images.
* Triton (Joyent Public Cloud)
* VMware
* VirtualBox
comes out of the box with support for many platforms, the full list of which can
be found at https://www.packer.io/docs/builders/index.html.
Support for other platforms can be added via plugins.
@ -48,10 +32,6 @@ The images that Packer creates can easily be turned into
[Vagrant](http://www.vagrantup.com) boxes.
## Quick Start
Download and install packages and dependencies
```
go get github.com/hashicorp/packer
```
**Note:** There is a great
[introduction and getting started guide](https://www.packer.io/intro)
@ -59,8 +39,10 @@ for those with a bit more patience. Otherwise, the quick start below
will get you up and running quickly, at the sacrifice of not explaining some
key points.
First, [download a pre-built Packer binary](https://www.packer.io/downloads.html)
for your operating system or [compile Packer yourself](CONTRIBUTING.md#setting-up-go-to-work-on-packer).
First, [download a pre-built Packer
binary](https://www.packer.io/downloads.html) for your operating system or
[compile Packer
yourself](https://github.com/hashicorp/packer/blob/master/.github/CONTRIBUTING.md#setting-up-go-to-work-on-packer).
After Packer is installed, create your first template, which tells Packer
what platforms to build images for and how you want to build them. In our
@ -98,7 +80,7 @@ Packer will build an AMI according to the "quick-start" template. The AMI
will be available in your AWS account. To delete the AMI, you must manually
delete it using the [AWS console](https://console.aws.amazon.com/). Packer
builds your images, it does not manage their lifecycle. Where they go, how
they're run, etc. is up to you.
they're run, etc., is up to you.
## Documentation
@ -108,4 +90,7 @@ https://www.packer.io/docs
## Developing Packer
See [CONTRIBUTING.md](https://github.com/hashicorp/packer/blob/master/CONTRIBUTING.md) for best practices and instructions on setting up your development environment to work on Packer.
See
[CONTRIBUTING.md](https://github.com/hashicorp/packer/blob/master/.github/CONTRIBUTING.md)
for best practices and instructions on setting up your development environment
to work on Packer.

11
Vagrantfile vendored
View File

@ -5,6 +5,10 @@ LINUX_BASE_BOX = "bento/ubuntu-16.04"
FREEBSD_BASE_BOX = "jen20/FreeBSD-12.0-CURRENT"
Vagrant.configure(2) do |config|
if Vagrant.has_plugin?("vagrant-cachier")
config.cache.scope = :box
end
# Compilation and development boxes
config.vm.define "linux", autostart: true, primary: true do |vmCfg|
vmCfg.vm.box = LINUX_BASE_BOX
@ -69,9 +73,10 @@ def configureProviders(vmCfg, cpus: "2", memory: "2048")
end
end
vmCfg.vm.provider "virtualbox" do |v|
v.memory = memory
v.cpus = cpus
vmCfg.vm.provider "docker" do |d, override|
d.build_dir = "."
d.has_ssh = true
override.vm.box = nil
end
return vmCfg

View File

@ -16,21 +16,18 @@ environment:
clone_folder: c:\gopath\src\github.com\hashicorp\packer
install:
- set GO15VENDOREXPERIMENT=1
- set GO111MODULE=off
- echo %Path%
- go version
- go env
- go get github.com/mitchellh/gox
- go get golang.org/x/tools/cmd/stringer
build_script:
- git rev-parse HEAD
# go test $(go list ./... | grep -v vendor)
- ps: |
go.exe test (go.exe list ./... `
go.exe test -timeout=2m (go.exe list ./... `
|? { -not $_.Contains('/vendor/') } `
|? { $_ -ne 'github.com/hashicorp/packer/builder/parallels/common' } `
|? { $_ -ne 'github.com/hashicorp/packer/common' }`
|? { $_ -ne 'github.com/hashicorp/packer/provisioner/ansible' })
test: off

View File

@ -1,18 +0,0 @@
PACKER=$GOPATH/src/github.com/mitchellh/packer
AZURE=/tmp/packer-azure
ls $AZURE >/dev/null || git clone https://github.com/Azure/packer-azure /tmp/packer-azure
PWD=`pwd`
cd $AZURE && git pull
cd $PWD
# copy things
cp -r $AZURE/packer/builder/azure $PACKER/builder/
cp -r $AZURE/packer/communicator/* $PACKER/communicator/
cp -r $AZURE/packer/provisioner/azureVmCustomScriptExtension $PACKER/provisioner/
# remove legacy API client
rm -rf $PACKER/builder/azure/smapi
# fix imports
find $PACKER/builder/azure/ -type f | grep ".go" | xargs sed -e 's/Azure\/packer-azure\/packer\/builder\/azure/mitchellh\/packer\/builder\/azure/g' -i ''

22
background_check.go Normal file
View File

@ -0,0 +1,22 @@
// +build !openbsd
package main
import (
"fmt"
"github.com/shirou/gopsutil/process"
)
func checkProcess(currentPID int) (bool, error) {
myProc, err := process.NewProcess(int32(currentPID))
if err != nil {
return false, fmt.Errorf("Process check error: %s", err)
}
bg, err := myProc.Background()
if err != nil {
return bg, fmt.Errorf("Process background check error: %s", err)
}
return bg, nil
}

View File

@ -0,0 +1,10 @@
package main
import (
"fmt"
)
func checkProcess(currentPID int) (bool, error) {
return false, fmt.Errorf("cannot determine if process is backgrounded in " +
"openbsd")
}

View File

@ -1,39 +1,61 @@
//go:generate struct-markdown
package ecs
import (
"fmt"
"os"
"time"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/hashicorp/packer/template/interpolate"
"github.com/hashicorp/packer/version"
)
// Config of alicloud
type AlicloudAccessConfig struct {
AlicloudAccessKey string `mapstructure:"access_key"`
AlicloudSecretKey string `mapstructure:"secret_key"`
AlicloudRegion string `mapstructure:"region"`
AlicloudSkipValidation bool `mapstructure:"skip_region_validation"`
SecurityToken string `mapstructure:"security_token"`
// This is the Alicloud access key. It must be provided, but it can also be
// sourced from the ALICLOUD_ACCESS_KEY environment variable.
AlicloudAccessKey string `mapstructure:"access_key" required:"true"`
// This is the Alicloud secret key. It must be provided, but it can also be
// sourced from the ALICLOUD_SECRET_KEY environment variable.
AlicloudSecretKey string `mapstructure:"secret_key" required:"true"`
// This is the Alicloud region. It must be provided, but it can also be
// sourced from the ALICLOUD_REGION environment variables.
AlicloudRegion string `mapstructure:"region" required:"true"`
// The region validation can be skipped if this value is true, the default
// value is false.
AlicloudSkipValidation bool `mapstructure:"skip_region_validation" required:"false"`
// STS access token, can be set through template or by exporting as
// environment variable such as `export SECURITY_TOKEN=value`.
SecurityToken string `mapstructure:"security_token" required:"false"`
client *ClientWrapper
}
const Packer = "HashiCorp-Packer"
const DefaultRequestReadTimeout = 10 * time.Second
// Client for AlicloudClient
func (c *AlicloudAccessConfig) Client() (*ecs.Client, error) {
if err := c.loadAndValidate(); err != nil {
return nil, err
func (c *AlicloudAccessConfig) Client() (*ClientWrapper, error) {
if c.client != nil {
return c.client, nil
}
if c.SecurityToken == "" {
c.SecurityToken = os.Getenv("SECURITY_TOKEN")
}
client := ecs.NewECSClientWithSecurityToken(c.AlicloudAccessKey, c.AlicloudSecretKey,
c.SecurityToken, common.Region(c.AlicloudRegion))
client.SetBusinessInfo("Packer")
if _, err := client.DescribeRegions(); err != nil {
client, err := ecs.NewClientWithStsToken(c.AlicloudRegion, c.AlicloudAccessKey,
c.AlicloudSecretKey, c.SecurityToken)
if err != nil {
return nil, err
}
return client, nil
client.AppendUserAgent(Packer, version.FormattedVersion())
client.SetReadTimeout(DefaultRequestReadTimeout)
c.client = &ClientWrapper{client}
return c.client, nil
}
func (c *AlicloudAccessConfig) Prepare(ctx *interpolate.Context) []error {
@ -42,10 +64,12 @@ func (c *AlicloudAccessConfig) Prepare(ctx *interpolate.Context) []error {
errs = append(errs, err)
}
if c.AlicloudRegion != "" && !c.AlicloudSkipValidation {
if c.validateRegion() != nil {
errs = append(errs, fmt.Errorf("Unknown alicloud region: %s", c.AlicloudRegion))
}
if c.AlicloudRegion == "" {
c.AlicloudRegion = os.Getenv("ALICLOUD_REGION")
}
if c.AlicloudRegion == "" {
errs = append(errs, fmt.Errorf("region option or ALICLOUD_REGION must be provided in template file or environment variables."))
}
if len(errs) > 0 {
@ -69,21 +93,38 @@ func (c *AlicloudAccessConfig) Config() error {
}
func (c *AlicloudAccessConfig) loadAndValidate() error {
if err := c.validateRegion(); err != nil {
func (c *AlicloudAccessConfig) ValidateRegion(region string) error {
supportedRegions, err := c.getSupportedRegions()
if err != nil {
return err
}
return nil
}
func (c *AlicloudAccessConfig) validateRegion() error {
for _, valid := range common.ValidRegions {
if c.AlicloudRegion == string(valid) {
for _, supportedRegion := range supportedRegions {
if region == supportedRegion {
return nil
}
}
return fmt.Errorf("Not a valid alicloud region: %s", c.AlicloudRegion)
return fmt.Errorf("Not a valid alicloud region: %s", region)
}
func (c *AlicloudAccessConfig) getSupportedRegions() ([]string, error) {
client, err := c.Client()
if err != nil {
return nil, err
}
regionsRequest := ecs.CreateDescribeRegionsRequest()
regionsResponse, err := client.DescribeRegions(regionsRequest)
if err != nil {
return nil, err
}
validRegions := make([]string, len(regionsResponse.Regions.Region))
for _, valid := range regionsResponse.Regions.Region {
validRegions = append(validRegions, valid.RegionId)
}
return validRegions, nil
}

View File

@ -1,6 +1,7 @@
package ecs
import (
"os"
"testing"
)
@ -14,14 +15,10 @@ func testAlicloudAccessConfig() *AlicloudAccessConfig {
func TestAlicloudAccessConfigPrepareRegion(t *testing.T) {
c := testAlicloudAccessConfig()
c.AlicloudRegion = ""
if err := c.Prepare(nil); err != nil {
t.Fatalf("shouldn't have err: %s", err)
}
c.AlicloudRegion = "cn-beijing-3"
c.AlicloudRegion = ""
if err := c.Prepare(nil); err == nil {
t.Fatal("should have error")
t.Fatalf("should have err")
}
c.AlicloudRegion = "cn-beijing"
@ -29,16 +26,11 @@ func TestAlicloudAccessConfigPrepareRegion(t *testing.T) {
t.Fatalf("shouldn't have err: %s", err)
}
c.AlicloudRegion = "unknown"
if err := c.Prepare(nil); err == nil {
t.Fatalf("should have err")
}
c.AlicloudRegion = "unknown"
c.AlicloudSkipValidation = true
os.Setenv("ALICLOUD_REGION", "cn-hangzhou")
c.AlicloudRegion = ""
if err := c.Prepare(nil); err != nil {
t.Fatalf("shouldn't have err: %s", err)
}
c.AlicloudSkipValidation = false
c.AlicloudSkipValidation = false
}

View File

@ -6,8 +6,7 @@ import (
"sort"
"strings"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/hashicorp/packer/packer"
)
@ -19,7 +18,7 @@ type Artifact struct {
BuilderIdValue string
// Alcloud connection for performing API stuff.
Client *ecs.Client
Client *ClientWrapper
}
func (a *Artifact) BuilderId() string {
@ -64,53 +63,73 @@ func (a *Artifact) State(name string) interface{} {
func (a *Artifact) Destroy() error {
errors := make([]error, 0)
for region, imageId := range a.AlicloudImages {
log.Printf("Delete alicloud image ID (%s) from region (%s)", imageId, region)
// Get alicloud image metadata
images, _, err := a.Client.DescribeImages(&ecs.DescribeImagesArgs{
RegionId: common.Region(region),
ImageId: imageId})
copyingImages := make(map[string]string, len(a.AlicloudImages))
sourceImage := make(map[string]*ecs.Image, 1)
for regionId, imageId := range a.AlicloudImages {
describeImagesRequest := ecs.CreateDescribeImagesRequest()
describeImagesRequest.RegionId = regionId
describeImagesRequest.ImageId = imageId
describeImagesRequest.Status = ImageStatusQueried
imagesResponse, err := a.Client.DescribeImages(describeImagesRequest)
if err != nil {
errors = append(errors, err)
}
images := imagesResponse.Images.Image
if len(images) == 0 {
err := fmt.Errorf("Error retrieving details for alicloud image(%s), no alicloud images found", imageId)
errors = append(errors, err)
continue
}
//Unshared the shared account before destroy
sharePermissions, err := a.Client.DescribeImageSharePermission(&ecs.ModifyImageSharePermissionArgs{RegionId: common.Region(region), ImageId: imageId})
if err != nil {
if images[0].IsCopied && images[0].Status != ImageStatusAvailable {
copyingImages[regionId] = imageId
} else {
sourceImage[regionId] = &images[0]
}
}
for regionId, imageId := range copyingImages {
log.Printf("Cancel copying alicloud image (%s) from region (%s)", imageId, regionId)
errs := a.unsharedAccountsOnImages(regionId, imageId)
if errs != nil {
errors = append(errors, errs...)
}
cancelImageCopyRequest := ecs.CreateCancelCopyImageRequest()
cancelImageCopyRequest.RegionId = regionId
cancelImageCopyRequest.ImageId = imageId
if _, err := a.Client.CancelCopyImage(cancelImageCopyRequest); err != nil {
errors = append(errors, err)
}
accountsNumber := len(sharePermissions.Accounts.Account)
if accountsNumber > 0 {
accounts := make([]string, accountsNumber)
for index, account := range sharePermissions.Accounts.Account {
accounts[index] = account.AliyunId
}
err := a.Client.ModifyImageSharePermission(&ecs.ModifyImageSharePermissionArgs{
}
RegionId: common.Region(region),
ImageId: imageId,
RemoveAccount: accounts,
})
for regionId, image := range sourceImage {
imageId := image.ImageId
log.Printf("Delete alicloud image (%s) from region (%s)", imageId, regionId)
errs := a.unsharedAccountsOnImages(regionId, imageId)
if errs != nil {
errors = append(errors, errs...)
}
deleteImageRequest := ecs.CreateDeleteImageRequest()
deleteImageRequest.RegionId = regionId
deleteImageRequest.ImageId = imageId
if _, err := a.Client.DeleteImage(deleteImageRequest); err != nil {
errors = append(errors, err)
}
//Delete the snapshot of this images
for _, diskDevices := range image.DiskDeviceMappings.DiskDeviceMapping {
deleteSnapshotRequest := ecs.CreateDeleteSnapshotRequest()
deleteSnapshotRequest.SnapshotId = diskDevices.SnapshotId
_, err := a.Client.DeleteSnapshot(deleteSnapshotRequest)
if err != nil {
errors = append(errors, err)
}
}
// Delete alicloud images
if err := a.Client.DeleteImage(common.Region(region), imageId); err != nil {
errors = append(errors, err)
}
//Delete the snapshot of this images
for _, diskDevices := range images[0].DiskDeviceMappings.DiskDeviceMapping {
if err := a.Client.DeleteSnapshot(diskDevices.SnapshotId); err != nil {
errors = append(errors, err)
}
}
}
if len(errors) > 0 {
@ -124,6 +143,38 @@ func (a *Artifact) Destroy() error {
return nil
}
func (a *Artifact) unsharedAccountsOnImages(regionId string, imageId string) []error {
var errors []error
describeImageShareRequest := ecs.CreateDescribeImageSharePermissionRequest()
describeImageShareRequest.RegionId = regionId
describeImageShareRequest.ImageId = imageId
imageShareResponse, err := a.Client.DescribeImageSharePermission(describeImageShareRequest)
if err != nil {
errors = append(errors, err)
return errors
}
accountsNumber := len(imageShareResponse.Accounts.Account)
if accountsNumber > 0 {
accounts := make([]string, accountsNumber)
for index, account := range imageShareResponse.Accounts.Account {
accounts[index] = account.AliyunId
}
modifyImageShareRequest := ecs.CreateModifyImageSharePermissionRequest()
modifyImageShareRequest.RegionId = regionId
modifyImageShareRequest.ImageId = imageId
modifyImageShareRequest.RemoveAccount = &accounts
_, err := a.Client.ModifyImageSharePermission(modifyImageShareRequest)
if err != nil {
errors = append(errors, err)
}
}
return errors
}
func (a *Artifact) stateAtlasMetadata() interface{} {
metadata := make(map[string]string)
for region, imageId := range a.AlicloudImages {

View File

@ -3,16 +3,15 @@
package ecs
import (
"log"
"context"
"fmt"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/helper/communicator"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/template/interpolate"
"github.com/mitchellh/multistep"
)
// The unique ID for this builder
@ -35,8 +34,6 @@ type Builder struct {
type InstanceNetWork string
const (
ClassicNet = InstanceNetWork("classic")
VpcNet = InstanceNetWork("vpc")
ALICLOUD_DEFAULT_SHORT_TIMEOUT = 180
ALICLOUD_DEFAULT_TIMEOUT = 1800
ALICLOUD_DEFAULT_LONG_TIMEOUT = 3600
@ -57,6 +54,11 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
return nil, err
}
if b.config.PackerConfig.PackerForce {
b.config.AlicloudImageForceDelete = true
b.config.AlicloudImageForceDeleteSnapshots = true
}
// Accumulate any errors
var errs *packer.MultiError
errs = packer.MultiErrorAppend(errs, b.config.AlicloudAccessConfig.Prepare(&b.config.ctx)...)
@ -67,18 +69,18 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
return nil, errs
}
log.Println(common.ScrubConfig(b.config, b.config.AlicloudAccessKey, b.config.AlicloudSecretKey))
packer.LogSecretFilter.Set(b.config.AlicloudAccessKey, b.config.AlicloudSecretKey)
return nil, nil
}
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (packer.Artifact, error) {
client, err := b.config.Client()
if err != nil {
return nil, err
}
state := new(multistep.BasicStateBag)
state.Put("config", b.config)
state.Put("config", &b.config)
state.Put("client", client)
state.Put("hook", hook)
state.Put("ui", ui)
@ -89,22 +91,19 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
steps = []multistep.Step{
&stepPreValidate{
AlicloudDestImageName: b.config.AlicloudImageName,
ForceDelete: b.config.AlicloudImageForceDetele,
ForceDelete: b.config.AlicloudImageForceDelete,
},
&stepCheckAlicloudSourceImage{
SourceECSImageId: b.config.AlicloudSourceImage,
},
&StepConfigAlicloudKeyPair{
Debug: b.config.PackerDebug,
KeyPairName: b.config.SSHKeyPairName,
PrivateKeyFile: b.config.Comm.SSHPrivateKey,
TemporaryKeyPairName: b.config.TemporaryKeyPairName,
SSHAgentAuth: b.config.Comm.SSHAgentAuth,
DebugKeyPath: fmt.Sprintf("ecs_%s.pem", b.config.PackerBuildName),
RegionId: b.config.AlicloudRegion,
&stepConfigAlicloudKeyPair{
Debug: b.config.PackerDebug,
Comm: &b.config.Comm,
DebugKeyPath: fmt.Sprintf("ecs_%s.pem", b.config.PackerBuildName),
RegionId: b.config.AlicloudRegion,
},
}
if b.chooseNetworkType() == VpcNet {
if b.chooseNetworkType() == InstanceNetworkVpc {
steps = append(steps,
&stepConfigAlicloudVPC{
VpcId: b.config.VpcId,
@ -132,50 +131,69 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
RegionId: b.config.AlicloudRegion,
InternetChargeType: b.config.InternetChargeType,
InternetMaxBandwidthOut: b.config.InternetMaxBandwidthOut,
InstnaceName: b.config.InstanceName,
InstanceName: b.config.InstanceName,
ZoneId: b.config.ZoneId,
})
if b.chooseNetworkType() == VpcNet {
steps = append(steps, &setpConfigAlicloudEIP{
if b.chooseNetworkType() == InstanceNetworkVpc {
steps = append(steps, &stepConfigAlicloudEIP{
AssociatePublicIpAddress: b.config.AssociatePublicIpAddress,
RegionId: b.config.AlicloudRegion,
InternetChargeType: b.config.InternetChargeType,
InternetMaxBandwidthOut: b.config.InternetMaxBandwidthOut,
SSHPrivateIp: b.config.SSHPrivateIp,
})
} else {
steps = append(steps, &stepConfigAlicloudPublicIP{
RegionId: b.config.AlicloudRegion,
RegionId: b.config.AlicloudRegion,
SSHPrivateIp: b.config.SSHPrivateIp,
})
}
steps = append(steps,
&stepAttachKeyPar{},
&stepAttachKeyPair{},
&stepRunAlicloudInstance{},
&stepMountAlicloudDisk{},
&communicator.StepConnect{
Config: &b.config.RunConfig.Comm,
Host: SSHHost(
client,
b.config.SSHPrivateIp),
SSHConfig: SSHConfig(
b.config.RunConfig.Comm.SSHAgentAuth,
b.config.RunConfig.Comm.SSHUsername,
b.config.RunConfig.Comm.SSHPassword),
SSHConfig: b.config.RunConfig.Comm.SSHConfigFunc(),
},
&common.StepProvision{},
&common.StepCleanupTempKeys{
Comm: &b.config.RunConfig.Comm,
},
&stepStopAlicloudInstance{
ForceStop: b.config.ForceStopInstance,
ForceStop: b.config.ForceStopInstance,
DisableStop: b.config.DisableStopInstance,
},
&stepDeleteAlicloudImageSnapshots{
AlicloudImageForceDeteleSnapshots: b.config.AlicloudImageForceDeteleSnapshots,
AlicloudImageForceDetele: b.config.AlicloudImageForceDetele,
AlicloudImageForceDeleteSnapshots: b.config.AlicloudImageForceDeleteSnapshots,
AlicloudImageForceDelete: b.config.AlicloudImageForceDelete,
AlicloudImageName: b.config.AlicloudImageName,
AlicloudImageDestinationRegions: b.config.AlicloudImageConfig.AlicloudImageDestinationRegions,
AlicloudImageDestinationNames: b.config.AlicloudImageConfig.AlicloudImageDestinationNames,
})
if b.config.AlicloudImageIgnoreDataDisks {
steps = append(steps, &stepCreateAlicloudSnapshot{
WaitSnapshotReadyTimeout: b.getSnapshotReadyTimeout(),
})
}
steps = append(steps,
&stepCreateAlicloudImage{
AlicloudImageIgnoreDataDisks: b.config.AlicloudImageIgnoreDataDisks,
WaitSnapshotReadyTimeout: b.getSnapshotReadyTimeout(),
},
&stepCreateAlicloudImage{},
&setpRegionCopyAlicloudImage{
&stepCreateTags{
Tags: b.config.AlicloudImageTags,
},
&stepRegionCopyAlicloudImage{
AlicloudImageDestinationRegions: b.config.AlicloudImageDestinationRegions,
AlicloudImageDestinationNames: b.config.AlicloudImageDestinationNames,
RegionId: b.config.AlicloudRegion,
},
&setpShareAlicloudImage{
&stepShareAlicloudImage{
AlicloudImageShareAccounts: b.config.AlicloudImageShareAccounts,
AlicloudImageUNShareAccounts: b.config.AlicloudImageUNShareAccounts,
RegionId: b.config.AlicloudRegion,
@ -183,7 +201,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
// Run!
b.runner = common.NewRunner(steps, b.config.PackerConfig, ui)
b.runner.Run(state)
b.runner.Run(ctx, state)
// If there was an error, return that
if rawErr, ok := state.GetOk("error"); ok {
@ -205,18 +223,11 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
return artifact, nil
}
func (b *Builder) Cancel() {
if b.runner != nil {
log.Println("Cancelling the step runner...")
b.runner.Cancel()
}
}
func (b *Builder) chooseNetworkType() InstanceNetWork {
if b.isVpcNetRequired() {
return VpcNet
return InstanceNetworkVpc
} else {
return ClassicNet
return InstanceNetworkClassic
}
}
@ -231,7 +242,7 @@ func (b *Builder) isVpcSpecified() bool {
func (b *Builder) isUserDataNeeded() bool {
// Public key setup requires userdata
if b.config.RunConfig.Comm.SSHPrivateKey != "" {
if b.config.RunConfig.Comm.SSHPrivateKeyFile != "" {
return true
}
@ -239,5 +250,13 @@ func (b *Builder) isUserDataNeeded() bool {
}
func (b *Builder) isKeyPairNeeded() bool {
return b.config.SSHKeyPairName != "" || b.config.TemporaryKeyPairName != ""
return b.config.Comm.SSHKeyPairName != "" || b.config.Comm.SSHTemporaryKeyPairName != ""
}
func (b *Builder) getSnapshotReadyTimeout() int {
if b.config.WaitSnapshotReadyTimeout > 0 {
return b.config.WaitSnapshotReadyTimeout
}
return ALICLOUD_DEFAULT_LONG_TIMEOUT
}

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,10 @@
package ecs
import (
"reflect"
"testing"
helperconfig "github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/packer"
)
@ -93,3 +95,143 @@ func TestBuilderPrepare_InvalidKey(t *testing.T) {
t.Fatal("should have error")
}
}
func TestBuilderPrepare_Devices(t *testing.T) {
var b Builder
config := testBuilderConfig()
config["system_disk_mapping"] = map[string]interface{}{
"disk_category": "cloud",
"disk_description": "system disk",
"disk_name": "system_disk",
"disk_size": 60,
}
config["image_disk_mappings"] = []map[string]interface{}{
{
"disk_category": "cloud_efficiency",
"disk_name": "data_disk1",
"disk_size": 100,
"disk_snapshot_id": "s-1",
"disk_description": "data disk1",
"disk_device": "/dev/xvdb",
"disk_delete_with_instance": false,
},
{
"disk_name": "data_disk2",
"disk_device": "/dev/xvdc",
},
}
warnings, err := b.Prepare(config)
if len(warnings) > 0 {
t.Fatalf("bad: %#v", warnings)
}
if err != nil {
t.Fatalf("should not have error: %s", err)
}
expected := AlicloudDiskDevice{
DiskCategory: "cloud",
Description: "system disk",
DiskName: "system_disk",
DiskSize: 60,
Encrypted: helperconfig.TriUnset,
}
if !reflect.DeepEqual(b.config.ECSSystemDiskMapping, expected) {
t.Fatalf("system disk is not set properly, actual: %v; expected: %v", b.config.ECSSystemDiskMapping, expected)
}
if !reflect.DeepEqual(b.config.ECSImagesDiskMappings, []AlicloudDiskDevice{
{
DiskCategory: "cloud_efficiency",
DiskName: "data_disk1",
DiskSize: 100,
SnapshotId: "s-1",
Description: "data disk1",
Device: "/dev/xvdb",
DeleteWithInstance: false,
},
{
DiskName: "data_disk2",
Device: "/dev/xvdc",
},
}) {
t.Fatalf("data disks are not set properly, actual: %#v", b.config.ECSImagesDiskMappings)
}
}
func TestBuilderPrepare_IgnoreDataDisks(t *testing.T) {
var b Builder
config := testBuilderConfig()
warnings, err := b.Prepare(config)
if len(warnings) > 0 {
t.Fatalf("bad: %#v", warnings)
}
if err != nil {
t.Fatalf("should not have error: %s", err)
}
if b.config.AlicloudImageIgnoreDataDisks != false {
t.Fatalf("image_ignore_data_disks is not set properly, expect: %t, actual: %t", false, b.config.AlicloudImageIgnoreDataDisks)
}
config["image_ignore_data_disks"] = "false"
warnings, err = b.Prepare(config)
if len(warnings) > 0 {
t.Fatalf("bad: %#v", warnings)
}
if err != nil {
t.Fatalf("should not have error: %s", err)
}
if b.config.AlicloudImageIgnoreDataDisks != false {
t.Fatalf("image_ignore_data_disks is not set properly, expect: %t, actual: %t", false, b.config.AlicloudImageIgnoreDataDisks)
}
config["image_ignore_data_disks"] = "true"
warnings, err = b.Prepare(config)
if len(warnings) > 0 {
t.Fatalf("bad: %#v", warnings)
}
if err != nil {
t.Fatalf("should not have error: %s", err)
}
if b.config.AlicloudImageIgnoreDataDisks != true {
t.Fatalf("image_ignore_data_disks is not set properly, expect: %t, actual: %t", true, b.config.AlicloudImageIgnoreDataDisks)
}
}
func TestBuilderPrepare_WaitSnapshotReadyTimeout(t *testing.T) {
var b Builder
config := testBuilderConfig()
warnings, err := b.Prepare(config)
if len(warnings) > 0 {
t.Fatalf("bad: %#v", warnings)
}
if err != nil {
t.Fatalf("should not have error: %s", err)
}
if b.config.WaitSnapshotReadyTimeout != 0 {
t.Fatalf("wait_snapshot_ready_timeout is not set properly, expect: %d, actual: %d", 0, b.config.WaitSnapshotReadyTimeout)
}
if b.getSnapshotReadyTimeout() != ALICLOUD_DEFAULT_LONG_TIMEOUT {
t.Fatalf("default timeout is not set properly, expect: %d, actual: %d", ALICLOUD_DEFAULT_LONG_TIMEOUT, b.getSnapshotReadyTimeout())
}
config["wait_snapshot_ready_timeout"] = ALICLOUD_DEFAULT_TIMEOUT
warnings, err = b.Prepare(config)
if len(warnings) > 0 {
t.Fatalf("bad: %#v", warnings)
}
if err != nil {
t.Fatalf("should not have error: %s", err)
}
if b.config.WaitSnapshotReadyTimeout != ALICLOUD_DEFAULT_TIMEOUT {
t.Fatalf("wait_snapshot_ready_timeout is not set properly, expect: %d, actual: %d", ALICLOUD_DEFAULT_TIMEOUT, b.config.WaitSnapshotReadyTimeout)
}
if b.getSnapshotReadyTimeout() != ALICLOUD_DEFAULT_TIMEOUT {
t.Fatalf("default timeout is not set properly, expect: %d, actual: %d", ALICLOUD_DEFAULT_TIMEOUT, b.getSnapshotReadyTimeout())
}
}

View File

@ -0,0 +1,310 @@
package ecs
import (
"fmt"
"time"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
)
type ClientWrapper struct {
*ecs.Client
}
const (
InstanceStatusRunning = "Running"
InstanceStatusStarting = "Starting"
InstanceStatusStopped = "Stopped"
InstanceStatusStopping = "Stopping"
)
const (
ImageStatusWaiting = "Waiting"
ImageStatusCreating = "Creating"
ImageStatusCreateFailed = "CreateFailed"
ImageStatusAvailable = "Available"
)
var ImageStatusQueried = fmt.Sprintf("%s,%s,%s,%s", ImageStatusWaiting, ImageStatusCreating, ImageStatusCreateFailed, ImageStatusAvailable)
const (
SnapshotStatusAll = "all"
SnapshotStatusProgressing = "progressing"
SnapshotStatusAccomplished = "accomplished"
SnapshotStatusFailed = "failed"
)
const (
DiskStatusInUse = "In_use"
DiskStatusAvailable = "Available"
DiskStatusAttaching = "Attaching"
DiskStatusDetaching = "Detaching"
DiskStatusCreating = "Creating"
DiskStatusReIniting = "ReIniting"
)
const (
VpcStatusPending = "Pending"
VpcStatusAvailable = "Available"
)
const (
VSwitchStatusPending = "Pending"
VSwitchStatusAvailable = "Available"
)
const (
EipStatusAssociating = "Associating"
EipStatusUnassociating = "Unassociating"
EipStatusInUse = "InUse"
EipStatusAvailable = "Available"
)
const (
ImageOwnerSystem = "system"
ImageOwnerSelf = "self"
ImageOwnerOthers = "others"
ImageOwnerMarketplace = "marketplace"
)
const (
IOOptimizedNone = "none"
IOOptimizedOptimized = "optimized"
)
const (
InstanceNetworkClassic = "classic"
InstanceNetworkVpc = "vpc"
)
const (
DiskTypeSystem = "system"
DiskTypeData = "data"
)
const (
TagResourceImage = "image"
TagResourceInstance = "instance"
TagResourceSnapshot = "snapshot"
TagResourceDisk = "disk"
)
const (
IpProtocolAll = "all"
IpProtocolTCP = "tcp"
IpProtocolUDP = "udp"
IpProtocolICMP = "icmp"
IpProtocolGRE = "gre"
)
const (
NicTypeInternet = "internet"
NicTypeIntranet = "intranet"
)
const (
DefaultPortRange = "-1/-1"
DefaultCidrIp = "0.0.0.0/0"
DefaultCidrBlock = "172.16.0.0/24"
)
const (
defaultRetryInterval = 5 * time.Second
defaultRetryTimes = 12
shortRetryTimes = 36
mediumRetryTimes = 360
longRetryTimes = 720
)
type WaitForExpectEvalResult struct {
evalPass bool
stopRetry bool
}
var (
WaitForExpectSuccess = WaitForExpectEvalResult{
evalPass: true,
stopRetry: true,
}
WaitForExpectToRetry = WaitForExpectEvalResult{
evalPass: false,
stopRetry: false,
}
WaitForExpectFailToStop = WaitForExpectEvalResult{
evalPass: false,
stopRetry: true,
}
)
type WaitForExpectArgs struct {
RequestFunc func() (responses.AcsResponse, error)
EvalFunc func(response responses.AcsResponse, err error) WaitForExpectEvalResult
RetryInterval time.Duration
RetryTimes int
RetryTimeout time.Duration
}
func (c *ClientWrapper) WaitForExpected(args *WaitForExpectArgs) (responses.AcsResponse, error) {
if args.RetryInterval <= 0 {
args.RetryInterval = defaultRetryInterval
}
if args.RetryTimes <= 0 {
args.RetryTimes = defaultRetryTimes
}
var timeoutPoint time.Time
if args.RetryTimeout > 0 {
timeoutPoint = time.Now().Add(args.RetryTimeout)
}
var lastResponse responses.AcsResponse
var lastError error
for i := 0; ; i++ {
if args.RetryTimeout > 0 && time.Now().After(timeoutPoint) {
break
}
if args.RetryTimeout <= 0 && i >= args.RetryTimes {
break
}
response, err := args.RequestFunc()
lastResponse = response
lastError = err
evalResult := args.EvalFunc(response, err)
if evalResult.evalPass {
return response, nil
}
if evalResult.stopRetry {
return response, err
}
time.Sleep(args.RetryInterval)
}
if lastError == nil {
lastError = fmt.Errorf("<no error>")
}
if args.RetryTimeout > 0 {
return lastResponse, fmt.Errorf("evaluate failed after %d seconds timeout with %d seconds retry interval: %s", int(args.RetryTimeout.Seconds()), int(args.RetryInterval.Seconds()), lastError)
}
return lastResponse, fmt.Errorf("evaluate failed after %d times retry with %d seconds retry interval: %s", args.RetryTimes, int(args.RetryInterval.Seconds()), lastError)
}
func (c *ClientWrapper) WaitForInstanceStatus(regionId string, instanceId string, expectedStatus string) (responses.AcsResponse, error) {
return c.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
request := ecs.CreateDescribeInstancesRequest()
request.RegionId = regionId
request.InstanceIds = fmt.Sprintf("[\"%s\"]", instanceId)
return c.DescribeInstances(request)
},
EvalFunc: func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
if err != nil {
return WaitForExpectToRetry
}
instancesResponse := response.(*ecs.DescribeInstancesResponse)
instances := instancesResponse.Instances.Instance
for _, instance := range instances {
if instance.Status == expectedStatus {
return WaitForExpectSuccess
}
}
return WaitForExpectToRetry
},
RetryTimes: mediumRetryTimes,
})
}
func (c *ClientWrapper) WaitForImageStatus(regionId string, imageId string, expectedStatus string, timeout time.Duration) (responses.AcsResponse, error) {
return c.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
request := ecs.CreateDescribeImagesRequest()
request.RegionId = regionId
request.ImageId = imageId
request.Status = ImageStatusQueried
return c.DescribeImages(request)
},
EvalFunc: func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
if err != nil {
return WaitForExpectToRetry
}
imagesResponse := response.(*ecs.DescribeImagesResponse)
images := imagesResponse.Images.Image
for _, image := range images {
if image.Status == expectedStatus {
return WaitForExpectSuccess
}
}
return WaitForExpectToRetry
},
RetryTimeout: timeout,
})
}
func (c *ClientWrapper) WaitForSnapshotStatus(regionId string, snapshotId string, expectedStatus string, timeout time.Duration) (responses.AcsResponse, error) {
return c.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
request := ecs.CreateDescribeSnapshotsRequest()
request.RegionId = regionId
request.SnapshotIds = fmt.Sprintf("[\"%s\"]", snapshotId)
return c.DescribeSnapshots(request)
},
EvalFunc: func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
if err != nil {
return WaitForExpectToRetry
}
snapshotsResponse := response.(*ecs.DescribeSnapshotsResponse)
snapshots := snapshotsResponse.Snapshots.Snapshot
for _, snapshot := range snapshots {
if snapshot.Status == expectedStatus {
return WaitForExpectSuccess
}
}
return WaitForExpectToRetry
},
RetryTimeout: timeout,
})
}
type EvalErrorType bool
const (
EvalRetryErrorType = EvalErrorType(true)
EvalNotRetryErrorType = EvalErrorType(false)
)
func (c *ClientWrapper) EvalCouldRetryResponse(evalErrors []string, evalErrorType EvalErrorType) func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
return func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
if err == nil {
return WaitForExpectSuccess
}
e, ok := err.(errors.Error)
if !ok {
return WaitForExpectToRetry
}
if evalErrorType == EvalRetryErrorType && !ContainsInArray(evalErrors, e.ErrorCode()) {
return WaitForExpectFailToStop
}
if evalErrorType == EvalNotRetryErrorType && ContainsInArray(evalErrors, e.ErrorCode()) {
return WaitForExpectFailToStop
}
return WaitForExpectToRetry
}
}

View File

@ -0,0 +1,83 @@
package ecs
import (
"fmt"
"testing"
"time"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
func TestWaitForExpectedExceedRetryTimes(t *testing.T) {
c := ClientWrapper{}
iter := 0
waitDone := make(chan bool, 1)
go func() {
_, _ = c.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
iter++
return nil, fmt.Errorf("test: let iteration %d failed", iter)
},
EvalFunc: func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
if err != nil {
fmt.Printf("need retry: %s\n", err)
return WaitForExpectToRetry
}
return WaitForExpectSuccess
},
})
waitDone <- true
}()
timeTolerance := 1 * time.Second
select {
case <-waitDone:
if iter != defaultRetryTimes {
t.Fatalf("WaitForExpected should terminate at the %d iterations", defaultRetryTimes)
}
case <-time.After(defaultRetryTimes*defaultRetryInterval + timeTolerance):
t.Fatalf("WaitForExpected should terminate within %f seconds", (defaultRetryTimes*defaultRetryInterval + timeTolerance).Seconds())
}
}
func TestWaitForExpectedExceedRetryTimeout(t *testing.T) {
c := ClientWrapper{}
expectTimeout := 10 * time.Second
iter := 0
waitDone := make(chan bool, 1)
go func() {
_, _ = c.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
iter++
return nil, fmt.Errorf("test: let iteration %d failed", iter)
},
EvalFunc: func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
if err != nil {
fmt.Printf("need retry: %s\n", err)
return WaitForExpectToRetry
}
return WaitForExpectSuccess
},
RetryTimeout: expectTimeout,
})
waitDone <- true
}()
timeTolerance := 1 * time.Second
select {
case <-waitDone:
if iter > int(expectTimeout/defaultRetryInterval) {
t.Fatalf("WaitForExpected should terminate before the %d iterations", int(expectTimeout/defaultRetryInterval))
}
case <-time.After(expectTimeout + timeTolerance):
t.Fatalf("WaitForExpected should terminate within %f seconds", (expectTimeout + timeTolerance).Seconds())
}
}

View File

@ -1,41 +1,198 @@
//go:generate struct-markdown
package ecs
import (
"fmt"
"github.com/denverdino/aliyungo/common"
"github.com/hashicorp/packer/template/interpolate"
"regexp"
"strings"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/template/interpolate"
)
type AlicloudDiskDevice struct {
DiskName string `mapstructure:"disk_name"`
DiskCategory string `mapstructure:"disk_category"`
DiskSize int `mapstructure:"disk_size"`
SnapshotId string `mapstructure:"disk_snapshot_id"`
Description string `mapstructure:"disk_description"`
DeleteWithInstance bool `mapstructure:"disk_delete_with_instance"`
Device string `mapstructure:"disk_device"`
// The value of disk name is blank by default. [2,
// 128] English or Chinese characters, must begin with an
// uppercase/lowercase letter or Chinese character. Can contain numbers,
// ., _ and -. The disk name will appear on the console. It cannot
// begin with http:// or https://.
DiskName string `mapstructure:"disk_name" required:"false"`
// Category of the system disk. Optional values
// are:
// - cloud - general cloud disk
// - cloud_efficiency - efficiency cloud disk
// - cloud_ssd - cloud SSD
DiskCategory string `mapstructure:"disk_category" required:"false"`
// Size of the system disk, measured in GiB. Value
// range: [20, 500]. The specified value must be equal to or greater
// than max{20, ImageSize}. Default value: max{40, ImageSize}.
DiskSize int `mapstructure:"disk_size" required:"false"`
// Snapshots are used to create the data
// disk After this parameter is specified, Size is ignored. The actual
// size of the created disk is the size of the specified snapshot.
SnapshotId string `mapstructure:"disk_snapshot_id" required:"false"`
// The value of disk description is blank by
// default. [2, 256] characters. The disk description will appear on the
// console. It cannot begin with http:// or https://.
Description string `mapstructure:"disk_description" required:"false"`
// Whether or not the disk is
// released along with the instance:
DeleteWithInstance bool `mapstructure:"disk_delete_with_instance" required:"false"`
// Device information of the related instance:
// such as /dev/xvdb It is null unless the Status is In_use.
Device string `mapstructure:"disk_device" required:"false"`
// Whether or not to encrypt the data disk.
// If this option is set to true, the data disk will be encryped and corresponding snapshot in the target image will also be encrypted. By
// default, if this is an extra data disk, Packer will not encrypt the
// data disk. Otherwise, Packer will keep the encryption setting to what
// it was in the source image. Please refer to Introduction of ECS disk encryption
// for more details.
Encrypted config.Trilean `mapstructure:"disk_encrypted" required:"false"`
}
type AlicloudDiskDevices struct {
ECSImagesDiskMappings []AlicloudDiskDevice `mapstructure:"image_disk_mappings"`
// Image disk mapping for system
// disk.
// - `disk_category` (string) - Category of the system disk. Optional values
// are:
// - `cloud` - general cloud disk
// - `cloud_efficiency` - efficiency cloud disk
// - `cloud_ssd` - cloud SSD
//
// For phased-out instance types and non-I/O optimized instances, the
// default value is cloud. Otherwise, the default value is
// cloud\_efficiency.
//
// - `disk_description` (string) - The value of disk description is blank by
// default. \[2, 256\] characters. The disk description will appear on the
// console. It cannot begin with `http://` or `https://`.
//
// - `disk_name` (string) - The value of disk name is blank by default. \[2,
// 128\] English or Chinese characters, must begin with an
// uppercase/lowercase letter or Chinese character. Can contain numbers,
// `.`, `_` and `-`. The disk name will appear on the console. It cannot
// begin with `http://` or `https://`.
//
// - `disk_size` (number) - Size of the system disk, measured in GiB. Value
// range: \[20, 500\]. The specified value must be equal to or greater
// than max{20, ImageSize}. Default value: max{40, ImageSize}.
//
ECSSystemDiskMapping AlicloudDiskDevice `mapstructure:"system_disk_mapping" required:"false"`
// Add one or more data
// disks to the image.
//
// - `disk_category` (string) - Category of the data disk. Optional values
// are:
// - `cloud` - general cloud disk
// - `cloud_efficiency` - efficiency cloud disk
// - `cloud_ssd` - cloud SSD
//
// Default value: cloud.
//
// - `disk_delete_with_instance` (boolean) - Whether or not the disk is
// released along with the instance:
// - True indicates that when the instance is released, this disk will
// be released with it
// - False indicates that when the instance is released, this disk will
// be retained.
// - `disk_description` (string) - The value of disk description is blank by
// default. \[2, 256\] characters. The disk description will appear on the
// console. It cannot begin with `http://` or `https://`.
//
// - `disk_device` (string) - Device information of the related instance:
// such as `/dev/xvdb` It is null unless the Status is In\_use.
//
// - `disk_name` (string) - The value of disk name is blank by default. \[2,
// 128\] English or Chinese characters, must begin with an
// uppercase/lowercase letter or Chinese character. Can contain numbers,
// `.`, `_` and `-`. The disk name will appear on the console. It cannot
// begin with `http://` or `https://`.
//
// - `disk_size` (number) - Size of the data disk, in GB, values range:
// - `cloud` - 5 \~ 2000
// - `cloud_efficiency` - 20 \~ 2048
// - `cloud_ssd` - 20 \~ 2048
//
// The value should be equal to or greater than the size of the specific
// SnapshotId.
//
// - `disk_snapshot_id` (string) - Snapshots are used to create the data
// disk After this parameter is specified, Size is ignored. The actual
// size of the created disk is the size of the specified snapshot.
//
// Snapshots from on or before July 15, 2013 cannot be used to create a
// disk.
//
// - `disk_encrypted` (boolean) - Whether or not to encrypt the data disk.
// If this option is set to true, the data disk will be encryped and corresponding snapshot in the target image will also be encrypted. By
// default, if this is an extra data disk, Packer will not encrypt the
// data disk. Otherwise, Packer will keep the encryption setting to what
// it was in the source image. Please refer to Introduction of [ECS disk encryption](https://www.alibabacloud.com/help/doc-detail/59643.htm)
// for more details.
//
ECSImagesDiskMappings []AlicloudDiskDevice `mapstructure:"image_disk_mappings" required:"false"`
}
type AlicloudImageConfig struct {
AlicloudImageName string `mapstructure:"image_name"`
AlicloudImageVersion string `mapstructure:"image_version"`
AlicloudImageDescription string `mapstructure:"image_description"`
AlicloudImageShareAccounts []string `mapstructure:"image_share_account"`
AlicloudImageUNShareAccounts []string `mapstructure:"image_unshare_account"`
AlicloudImageDestinationRegions []string `mapstructure:"image_copy_regions"`
AlicloudImageDestinationNames []string `mapstructure:"image_copy_names"`
AlicloudImageForceDetele bool `mapstructure:"image_force_delete"`
AlicloudImageForceDeteleSnapshots bool `mapstructure:"image_force_delete_snapshots"`
AlicloudImageForceDeleteInstances bool `mapstructure:"image_force_delete_instances"`
AlicloudImageSkipRegionValidation bool `mapstructure:"skip_region_validation"`
AlicloudDiskDevices `mapstructure:",squash"`
// The name of the user-defined image, [2, 128]
// English or Chinese characters. It must begin with an uppercase/lowercase
// letter or a Chinese character, and may contain numbers, _ or -. It
// cannot begin with http:// or https://.
AlicloudImageName string `mapstructure:"image_name" required:"true"`
// The version number of the image, with a length
// limit of 1 to 40 English characters.
AlicloudImageVersion string `mapstructure:"image_version" required:"false"`
// The description of the image, with a length
// limit of 0 to 256 characters. Leaving it blank means null, which is the
// default value. It cannot begin with http:// or https://.
AlicloudImageDescription string `mapstructure:"image_description" required:"false"`
// The IDs of to-be-added Aliyun
// accounts to which the image is shared. The number of accounts is 1 to 10.
// If number of accounts is greater than 10, this parameter is ignored.
AlicloudImageShareAccounts []string `mapstructure:"image_share_account" required:"false"`
AlicloudImageUNShareAccounts []string `mapstructure:"image_unshare_account"`
// Copy to the destination regionIds.
AlicloudImageDestinationRegions []string `mapstructure:"image_copy_regions" required:"false"`
// The name of the destination image,
// [2, 128] English or Chinese characters. It must begin with an
// uppercase/lowercase letter or a Chinese character, and may contain numbers,
// _ or -. It cannot begin with http:// or https://.
AlicloudImageDestinationNames []string `mapstructure:"image_copy_names" required:"false"`
// Whether or not to encrypt the target images, including those copied if image_copy_regions is specified. If this option
// is set to true, a temporary image will be created from the provisioned
// instance in the main region and an encrypted copy will be generated in the
// same region. By default, Packer will keep the encryption setting to what
// it was in the source image.
ImageEncrypted config.Trilean `mapstructure:"image_encrypted" required:"false"`
// If this value is true, when the target image names including those
// copied are duplicated with existing images, it will delete the existing
// images and then create the target images, otherwise, the creation will
// fail. The default value is false. Check `image_name` and
// `image_copy_names` options for names of target images. If
// [-force](https://packer.io/docs/commands/build.html#force) option is
// provided in `build` command, this option can be omitted and taken as
// true.
AlicloudImageForceDelete bool `mapstructure:"image_force_delete" required:"false"`
// If this value is true, when delete the duplicated existing images, the
// source snapshots of those images will be delete either. If
// [-force](https://packer.io/docs/commands/build.html#force) option is
// provided in `build` command, this option can be omitted and taken as
// true.
AlicloudImageForceDeleteSnapshots bool `mapstructure:"image_force_delete_snapshots" required:"false"`
AlicloudImageForceDeleteInstances bool `mapstructure:"image_force_delete_instances"`
// If this value is true, the image
// created will not include any snapshot of data disks. This option would be
// useful for any circumstance that default data disks with instance types are
// not concerned. The default value is false.
AlicloudImageIgnoreDataDisks bool `mapstructure:"image_ignore_data_disks" required:"false"`
// The region validation can be skipped
// if this value is true, the default value is false.
AlicloudImageSkipRegionValidation bool `mapstructure:"skip_region_validation" required:"false"`
// Tags applied to the destination
// image and relevant snapshots.
AlicloudImageTags map[string]string `mapstructure:"tags" required:"false"`
AlicloudDiskDevices `mapstructure:",squash"`
}
func (c *AlicloudImageConfig) Prepare(ctx *interpolate.Context) []error {
@ -65,15 +222,6 @@ func (c *AlicloudImageConfig) Prepare(ctx *interpolate.Context) []error {
// Mark that we saw the region
regionSet[region] = struct{}{}
if !c.AlicloudImageSkipRegionValidation {
// Verify the region is real
if valid := validateRegion(region); valid != nil {
errs = append(errs, fmt.Errorf("Unknown region: %s", region))
continue
}
}
regions = append(regions, region)
}
@ -86,14 +234,3 @@ func (c *AlicloudImageConfig) Prepare(ctx *interpolate.Context) []error {
return nil
}
func validateRegion(region string) error {
for _, valid := range common.ValidRegions {
if region == string(valid) {
return nil
}
}
return fmt.Errorf("Not a valid alicloud region: %s", region)
}

View File

@ -2,8 +2,6 @@ package ecs
import (
"testing"
"github.com/denverdino/aliyungo/common"
)
func testAlicloudImageConfig() *AlicloudImageConfig {
@ -31,34 +29,33 @@ func TestAMIConfigPrepare_regions(t *testing.T) {
t.Fatalf("shouldn't have err: %s", err)
}
c.AlicloudImageDestinationRegions = regionsToString()
if err := c.Prepare(nil); err != nil {
t.Fatalf("shouldn't have err: %s", err)
}
c.AlicloudImageDestinationRegions = []string{"foo"}
if err := c.Prepare(nil); err == nil {
t.Fatal("should have error")
}
c.AlicloudImageDestinationRegions = []string{"cn-beijing", "cn-hangzhou", "eu-central-1"}
if err := c.Prepare(nil); err != nil {
t.Fatalf("bad: %s", err)
}
c.AlicloudImageDestinationRegions = []string{"unknow"}
c.AlicloudImageDestinationRegions = nil
c.AlicloudImageSkipRegionValidation = true
if err := c.Prepare(nil); err != nil {
t.Fatal("shouldn't have error")
}
c.AlicloudImageSkipRegionValidation = false
}
func regionsToString() []string {
var regions []string
for _, region := range common.ValidRegions {
regions = append(regions, string(region))
func TestECSImageConfigPrepare_imageTags(t *testing.T) {
c := testAlicloudImageConfig()
c.AlicloudImageTags = map[string]string{
"TagKey1": "TagValue1",
"TagKey2": "TagValue2",
}
if err := c.Prepare(nil); len(err) != 0 {
t.Fatalf("err: %s", err)
}
if len(c.AlicloudImageTags) != 2 || c.AlicloudImageTags["TagKey1"] != "TagValue1" ||
c.AlicloudImageTags["TagKey2"] != "TagValue2" {
t.Fatalf("invalid value, expected: %s, actual: %s", map[string]string{
"TagKey1": "TagValue1",
"TagKey2": "TagValue2",
}, c.AlicloudImageTags)
}
return regions
}

View File

@ -2,12 +2,13 @@ package ecs
import (
"fmt"
"strconv"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
func message(state multistep.StateBag, module string) {
func cleanUpMessage(state multistep.StateBag, module string) {
_, cancelled := state.GetOk(multistep.StateCancelled)
_, halted := state.GetOk(multistep.StateHalted)
@ -18,5 +19,34 @@ func message(state multistep.StateBag, module string) {
} else {
ui.Say(fmt.Sprintf("Cleaning up '%s'", module))
}
}
func halt(state multistep.StateBag, err error, prefix string) multistep.StepAction {
ui := state.Get("ui").(packer.Ui)
if prefix != "" {
err = fmt.Errorf("%s: %s", prefix, err)
}
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
func convertNumber(value int) string {
if value <= 0 {
return ""
}
return strconv.Itoa(value)
}
func ContainsInArray(arr []string, value string) bool {
for _, item := range arr {
if item == value {
return true
}
}
return false
}

View File

@ -1,3 +1,5 @@
//go:generate struct-markdown
package ecs
import (
@ -8,42 +10,122 @@ import (
"github.com/hashicorp/packer/common/uuid"
"github.com/hashicorp/packer/helper/communicator"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/template/interpolate"
)
type RunConfig struct {
AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address"`
ZoneId string `mapstructure:"zone_id"`
IOOptimized bool `mapstructure:"io_optimized"`
InstanceType string `mapstructure:"instance_type"`
Description string `mapstructure:"description"`
AlicloudSourceImage string `mapstructure:"source_image"`
ForceStopInstance bool `mapstructure:"force_stop_instance"`
SecurityGroupId string `mapstructure:"security_group_id"`
SecurityGroupName string `mapstructure:"security_group_name"`
UserData string `mapstructure:"user_data"`
UserDataFile string `mapstructure:"user_data_file"`
VpcId string `mapstructure:"vpc_id"`
VpcName string `mapstructure:"vpc_name"`
CidrBlock string `mapstructure:"vpc_cidr_block"`
VSwitchId string `mapstructure:"vswitch_id"`
VSwitchName string `mapstructure:"vswitch_id"`
InstanceName string `mapstructure:"instance_name"`
InternetChargeType string `mapstructure:"internet_charge_type"`
InternetMaxBandwidthOut int `mapstructure:"internet_max_bandwidth_out"`
TemporaryKeyPairName string `mapstructure:"temporary_key_pair_name"`
AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address"`
// ID of the zone to which the disk belongs.
ZoneId string `mapstructure:"zone_id" required:"false"`
// Whether an ECS instance is I/O optimized or not. If this option is not
// provided, the value will be determined by product API according to what
// `instance_type` is used.
IOOptimized config.Trilean `mapstructure:"io_optimized" required:"false"`
// Type of the instance. For values, see [Instance Type
// Table](https://www.alibabacloud.com/help/doc-detail/25378.htm?spm=a3c0i.o25499en.a3.9.14a36ac8iYqKRA).
// You can also obtain the latest instance type table by invoking the
// [Querying Instance Type
// Table](https://intl.aliyun.com/help/doc-detail/25620.htm?spm=a3c0i.o25499en.a3.6.Dr1bik)
// interface.
InstanceType string `mapstructure:"instance_type" required:"true"`
Description string `mapstructure:"description"`
// This is the base image id which you want to
// create your customized images.
AlicloudSourceImage string `mapstructure:"source_image" required:"true"`
// Whether to force shutdown upon device
// restart. The default value is `false`.
//
// If it is set to `false`, the system is shut down normally; if it is set to
// `true`, the system is forced to shut down.
ForceStopInstance bool `mapstructure:"force_stop_instance" required:"false"`
// If this option is set to true, Packer
// will not stop the instance for you, and you need to make sure the instance
// will be stopped in the final provisioner command. Otherwise, Packer will
// timeout while waiting the instance to be stopped. This option is provided
// for some specific scenarios that you want to stop the instance by yourself.
// E.g., Sysprep a windows which may shutdown the instance within its command.
// The default value is false.
DisableStopInstance bool `mapstructure:"disable_stop_instance" required:"false"`
// ID of the security group to which a newly
// created instance belongs. Mutual access is allowed between instances in one
// security group. If not specified, the newly created instance will be added
// to the default security group. If the default group doesnt exist, or the
// number of instances in it has reached the maximum limit, a new security
// group will be created automatically.
SecurityGroupId string `mapstructure:"security_group_id" required:"false"`
// The security group name. The default value
// is blank. [2, 128] English or Chinese characters, must begin with an
// uppercase/lowercase letter or Chinese character. Can contain numbers, .,
// _ or -. It cannot begin with http:// or https://.
SecurityGroupName string `mapstructure:"security_group_name" required:"false"`
// User data to apply when launching the instance. Note
// that you need to be careful about escaping characters due to the templates
// being JSON. It is often more convenient to use user_data_file, instead.
// Packer will not automatically wait for a user script to finish before
// shutting down the instance this must be handled in a provisioner.
UserData string `mapstructure:"user_data" required:"false"`
// Path to a file that will be used for the user
// data when launching the instance.
UserDataFile string `mapstructure:"user_data_file" required:"false"`
// VPC ID allocated by the system.
VpcId string `mapstructure:"vpc_id" required:"false"`
// The VPC name. The default value is blank. [2, 128]
// English or Chinese characters, must begin with an uppercase/lowercase
// letter or Chinese character. Can contain numbers, _ and -. The disk
// description will appear on the console. Cannot begin with http:// or
// https://.
VpcName string `mapstructure:"vpc_name" required:"false"`
// Value options: 192.168.0.0/16 and
// 172.16.0.0/16. When not specified, the default value is 172.16.0.0/16.
CidrBlock string `mapstructure:"vpc_cidr_block" required:"false"`
// The ID of the VSwitch to be used.
VSwitchId string `mapstructure:"vswitch_id" required:"false"`
// The ID of the VSwitch to be used.
VSwitchName string `mapstructure:"vswitch_name" required:"false"`
// Display name of the instance, which is a string of 2 to 128 Chinese or
// English characters. It must begin with an uppercase/lowercase letter or
// a Chinese character and can contain numerals, `.`, `_`, or `-`. The
// instance name is displayed on the Alibaba Cloud console. If this
// parameter is not specified, the default value is InstanceId of the
// instance. It cannot begin with `http://` or `https://`.
InstanceName string `mapstructure:"instance_name" required:"false"`
// Internet charge type, which can be
// `PayByTraffic` or `PayByBandwidth`. Optional values:
// - `PayByBandwidth`
// - `PayByTraffic`
//
// If this parameter is not specified, the default value is `PayByBandwidth`.
// For the regions out of China, currently only support `PayByTraffic`, you
// must set it manfully.
InternetChargeType string `mapstructure:"internet_charge_type" required:"false"`
// Maximum outgoing bandwidth to the
// public network, measured in Mbps (Mega bits per second).
//
// Value range:
// - `PayByBandwidth`: \[0, 100\]. If this parameter is not specified, API
// automatically sets it to 0 Mbps.
// - `PayByTraffic`: \[1, 100\]. If this parameter is not specified, an
// error is returned.
InternetMaxBandwidthOut int `mapstructure:"internet_max_bandwidth_out" required:"false"`
// Timeout of creating snapshot(s).
// The default timeout is 3600 seconds if this option is not set or is set
// to 0. For those disks containing lots of data, it may require a higher
// timeout value.
WaitSnapshotReadyTimeout int `mapstructure:"wait_snapshot_ready_timeout" required:"false"`
// Communicator settings
Comm communicator.Config `mapstructure:",squash"`
SSHKeyPairName string `mapstructure:"ssh_keypair_name"`
SSHPrivateIp bool `mapstructure:"ssh_private_ip"`
Comm communicator.Config `mapstructure:",squash"`
// If this value is true, packer will connect to
// the ECS created through private ip instead of allocating a public ip or an
// EIP. The default value is false.
SSHPrivateIp bool `mapstructure:"ssh_private_ip" required:"false"`
}
func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
if c.SSHKeyPairName == "" && c.TemporaryKeyPairName == "" &&
c.Comm.SSHPrivateKey == "" && c.Comm.SSHPassword == "" && c.Comm.WinRMPassword == "" {
if c.Comm.SSHKeyPairName == "" && c.Comm.SSHTemporaryKeyPairName == "" &&
c.Comm.SSHPrivateKeyFile == "" && c.Comm.SSHPassword == "" && c.Comm.WinRMPassword == "" {
c.TemporaryKeyPairName = fmt.Sprintf("packer_%s", uuid.TimeOrderedUUID())
c.Comm.SSHTemporaryKeyPairName = fmt.Sprintf("packer_%s", uuid.TimeOrderedUUID())
}
// Validation
@ -57,7 +139,7 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
}
if c.InstanceType == "" {
errs = append(errs, errors.New("An aliclod_instance_type must be specified"))
errs = append(errs, errors.New("An alicloud_instance_type must be specified"))
}
if c.UserData != "" && c.UserDataFile != "" {

View File

@ -2,6 +2,7 @@ package ecs
import (
"io/ioutil"
"os"
"testing"
"github.com/hashicorp/packer/helper/communicator"
@ -12,7 +13,9 @@ func testConfig() *RunConfig {
AlicloudSourceImage: "alicloud_images",
InstanceType: "ecs.n1.tiny",
Comm: communicator.Config{
SSHUsername: "alicloud",
SSH: communicator.SSH{
SSHUsername: "alicloud",
},
},
}
}
@ -68,6 +71,7 @@ func TestRunConfigPrepare_UserData(t *testing.T) {
if err != nil {
t.Fatalf("err: %s", err)
}
defer os.Remove(tf.Name())
defer tf.Close()
c.UserData = "foo"
@ -92,6 +96,7 @@ func TestRunConfigPrepare_UserDataFile(t *testing.T) {
if err != nil {
t.Fatalf("err: %s", err)
}
defer os.Remove(tf.Name())
defer tf.Close()
c.UserDataFile = tf.Name()
@ -102,21 +107,72 @@ func TestRunConfigPrepare_UserDataFile(t *testing.T) {
func TestRunConfigPrepare_TemporaryKeyPairName(t *testing.T) {
c := testConfig()
c.TemporaryKeyPairName = ""
c.Comm.SSHTemporaryKeyPairName = ""
if err := c.Prepare(nil); len(err) != 0 {
t.Fatalf("err: %s", err)
}
if c.TemporaryKeyPairName == "" {
if c.Comm.SSHTemporaryKeyPairName == "" {
t.Fatal("keypair name is empty")
}
c.TemporaryKeyPairName = "ssh-key-123"
c.Comm.SSHTemporaryKeyPairName = "ssh-key-123"
if err := c.Prepare(nil); len(err) != 0 {
t.Fatalf("err: %s", err)
}
if c.TemporaryKeyPairName != "ssh-key-123" {
if c.Comm.SSHTemporaryKeyPairName != "ssh-key-123" {
t.Fatal("keypair name does not match")
}
}
func TestRunConfigPrepare_SSHPrivateIp(t *testing.T) {
c := testConfig()
if err := c.Prepare(nil); len(err) != 0 {
t.Fatalf("err: %s", err)
}
if c.SSHPrivateIp != false {
t.Fatalf("invalid value, expected: %t, actul: %t", false, c.SSHPrivateIp)
}
c.SSHPrivateIp = true
if err := c.Prepare(nil); len(err) != 0 {
t.Fatalf("err: %s", err)
}
if c.SSHPrivateIp != true {
t.Fatalf("invalid value, expected: %t, actul: %t", true, c.SSHPrivateIp)
}
c.SSHPrivateIp = false
if err := c.Prepare(nil); len(err) != 0 {
t.Fatalf("err: %s", err)
}
if c.SSHPrivateIp != false {
t.Fatalf("invalid value, expected: %t, actul: %t", false, c.SSHPrivateIp)
}
}
func TestRunConfigPrepare_DisableStopInstance(t *testing.T) {
c := testConfig()
if err := c.Prepare(nil); len(err) != 0 {
t.Fatalf("err: %s", err)
}
if c.DisableStopInstance != false {
t.Fatalf("invalid value, expected: %t, actul: %t", false, c.DisableStopInstance)
}
c.DisableStopInstance = true
if err := c.Prepare(nil); len(err) != 0 {
t.Fatalf("err: %s", err)
}
if c.DisableStopInstance != true {
t.Fatalf("invalid value, expected: %t, actul: %t", true, c.DisableStopInstance)
}
c.DisableStopInstance = false
if err := c.Prepare(nil); len(err) != 0 {
t.Fatalf("err: %s", err)
}
if c.DisableStopInstance != false {
t.Fatalf("invalid value, expected: %t, actul: %t", false, c.DisableStopInstance)
}
}

View File

@ -1,15 +1,9 @@
package ecs
import (
"fmt"
"net"
"os"
"time"
packerssh "github.com/hashicorp/packer/communicator/ssh"
"github.com/mitchellh/multistep"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
"github.com/hashicorp/packer/helper/multistep"
)
var (
@ -27,57 +21,3 @@ func SSHHost(e alicloudSSHHelper, private bool) func(multistep.StateBag) (string
return ipAddress, nil
}
}
// SSHConfig returns a function that can be used for the SSH communicator
// config for connecting to the instance created over SSH using the private key
// or password.
func SSHConfig(useAgent bool, username, password string) func(multistep.StateBag) (*ssh.ClientConfig, error) {
return func(state multistep.StateBag) (*ssh.ClientConfig, error) {
if useAgent {
authSock := os.Getenv("SSH_AUTH_SOCK")
if authSock == "" {
return nil, fmt.Errorf("SSH_AUTH_SOCK is not set")
}
sshAgent, err := net.Dial("unix", authSock)
if err != nil {
return nil, fmt.Errorf("Cannot connect to SSH Agent socket %q: %s", authSock, err)
}
return &ssh.ClientConfig{
User: username,
Auth: []ssh.AuthMethod{
ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}, nil
}
privateKey, hasKey := state.GetOk("privateKey")
if hasKey {
signer, err := ssh.ParsePrivateKey([]byte(privateKey.(string)))
if err != nil {
return nil, fmt.Errorf("Error setting up SSH config: %s", err)
}
return &ssh.ClientConfig{
User: username,
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}, nil
} else {
return &ssh.ClientConfig{
User: username,
Auth: []ssh.AuthMethod{
ssh.Password(password),
ssh.KeyboardInteractive(
packerssh.PasswordKeyboardInteractive(password)),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}, nil
}
}
}

View File

@ -1,65 +1,69 @@
package ecs
import (
"context"
"fmt"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
"time"
)
type stepAttachKeyPar struct {
type stepAttachKeyPair struct {
}
func (s *stepAttachKeyPar) Run(state multistep.StateBag) multistep.StepAction {
keyPairName := state.Get("keyPair").(string)
var attachKeyPairNotRetryErrors = []string{
"MissingParameter",
"DependencyViolation.WindowsInstance",
"InvalidKeyPairName.NotFound",
"InvalidRegionId.NotFound",
}
func (s *stepAttachKeyPair) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
ui := state.Get("ui").(packer.Ui)
client := state.Get("client").(*ClientWrapper)
config := state.Get("config").(*Config)
instance := state.Get("instance").(*ecs.Instance)
keyPairName := config.Comm.SSHKeyPairName
if keyPairName == "" {
return multistep.ActionContinue
}
ui := state.Get("ui").(packer.Ui)
client := state.Get("client").(*ecs.Client)
config := state.Get("config").(Config)
instance := state.Get("instance").(*ecs.InstanceAttributesType)
timeoutPoint := time.Now().Add(120 * time.Second)
for {
err := client.AttachKeyPair(&ecs.AttachKeyPairArgs{RegionId: common.Region(config.AlicloudRegion),
KeyPairName: keyPairName, InstanceIds: "[\"" + instance.InstanceId + "\"]"})
if err != nil {
e, _ := err.(*common.Error)
if (!(e.Code == "MissingParameter" || e.Code == "DependencyViolation.WindowsInstance" ||
e.Code == "InvalidKeyPairName.NotFound" || e.Code == "InvalidRegionId.NotFound")) &&
time.Now().Before(timeoutPoint) {
time.Sleep(5 * time.Second)
continue
}
err := fmt.Errorf("Error attaching keypair %s to instance %s : %s",
keyPairName, instance.InstanceId, err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
break
_, err := client.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
request := ecs.CreateAttachKeyPairRequest()
request.RegionId = config.AlicloudRegion
request.KeyPairName = keyPairName
request.InstanceIds = "[\"" + instance.InstanceId + "\"]"
return client.AttachKeyPair(request)
},
EvalFunc: client.EvalCouldRetryResponse(attachKeyPairNotRetryErrors, EvalNotRetryErrorType),
})
if err != nil {
return halt(state, err, fmt.Sprintf("Error attaching keypair %s to instance %s", keyPairName, instance.InstanceId))
}
ui.Message(fmt.Sprintf("Attach keypair %s to instance: %s", keyPairName, instance.InstanceId))
return multistep.ActionContinue
}
func (s *stepAttachKeyPar) Cleanup(state multistep.StateBag) {
keyPairName := state.Get("keyPair").(string)
func (s *stepAttachKeyPair) Cleanup(state multistep.StateBag) {
client := state.Get("client").(*ClientWrapper)
config := state.Get("config").(*Config)
ui := state.Get("ui").(packer.Ui)
instance := state.Get("instance").(*ecs.Instance)
keyPairName := config.Comm.SSHKeyPairName
if keyPairName == "" {
return
}
client := state.Get("client").(*ecs.Client)
config := state.Get("config").(Config)
ui := state.Get("ui").(packer.Ui)
instance := state.Get("instance").(*ecs.InstanceAttributesType)
err := client.DetachKeyPair(&ecs.DetachKeyPairArgs{RegionId: common.Region(config.AlicloudRegion),
KeyPairName: keyPairName, InstanceIds: "[\"" + instance.InstanceId + "\"]"})
detachKeyPairRequest := ecs.CreateDetachKeyPairRequest()
detachKeyPairRequest.RegionId = config.AlicloudRegion
detachKeyPairRequest.KeyPairName = keyPairName
detachKeyPairRequest.InstanceIds = fmt.Sprintf("[\"%s\"]", instance.InstanceId)
_, err := client.DetachKeyPair(detachKeyPairRequest)
if err != nil {
err := fmt.Errorf("Error Detaching keypair %s to instance %s : %s", keyPairName,
instance.InstanceId, err)

View File

@ -1,36 +1,48 @@
package ecs
import (
"context"
"fmt"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type stepCheckAlicloudSourceImage struct {
SourceECSImageId string
}
func (s *stepCheckAlicloudSourceImage) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ecs.Client)
config := state.Get("config").(Config)
func (s *stepCheckAlicloudSourceImage) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ClientWrapper)
config := state.Get("config").(*Config)
ui := state.Get("ui").(packer.Ui)
images, _, err := client.DescribeImages(&ecs.DescribeImagesArgs{RegionId: common.Region(config.AlicloudRegion),
ImageId: config.AlicloudSourceImage})
describeImagesRequest := ecs.CreateDescribeImagesRequest()
describeImagesRequest.RegionId = config.AlicloudRegion
describeImagesRequest.ImageId = config.AlicloudSourceImage
imagesResponse, err := client.DescribeImages(describeImagesRequest)
if err != nil {
err := fmt.Errorf("Error querying alicloud image: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
return halt(state, err, "Error querying alicloud image")
}
images := imagesResponse.Images.Image
// Describe marketplace image
describeImagesRequest.ImageOwnerAlias = "marketplace"
marketImagesResponse, err := client.DescribeImages(describeImagesRequest)
if err != nil {
return halt(state, err, "Error querying alicloud marketplace image")
}
marketImages := marketImagesResponse.Images.Image
if len(marketImages) > 0 {
images = append(images, marketImages...)
}
if len(images) == 0 {
err := fmt.Errorf("No alicloud image was found matching filters: %v", config.AlicloudSourceImage)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
return halt(state, err, "")
}
ui.Message(fmt.Sprintf("Found image ID: %s", images[0].ImageId))

View File

@ -1,79 +1,165 @@
package ecs
import (
"context"
"fmt"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
"github.com/hashicorp/packer/common/uuid"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type setpConfigAlicloudEIP struct {
type stepConfigAlicloudEIP struct {
AssociatePublicIpAddress bool
RegionId string
InternetChargeType string
InternetMaxBandwidthOut int
allocatedId string
SSHPrivateIp bool
}
func (s *setpConfigAlicloudEIP) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ecs.Client)
var allocateEipAddressRetryErrors = []string{
"LastTokenProcessing",
}
func (s *stepConfigAlicloudEIP) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
instance := state.Get("instance").(*ecs.InstanceAttributesType)
ui.Say("Allocating eip")
ipaddress, allocateId, err := client.AllocateEipAddress(&ecs.AllocateEipAddressArgs{
RegionId: common.Region(s.RegionId), InternetChargeType: common.InternetChargeType(s.InternetChargeType),
instance := state.Get("instance").(*ecs.Instance)
if s.SSHPrivateIp {
ipaddress := instance.VpcAttributes.PrivateIpAddress.IpAddress
if len(ipaddress) == 0 {
ui.Say("Failed to get private ip of instance")
return multistep.ActionHalt
}
state.Put("ipaddress", ipaddress[0])
return multistep.ActionContinue
}
ui.Say("Allocating eip...")
allocateEipAddressRequest := s.buildAllocateEipAddressRequest(state)
allocateEipAddressResponse, err := client.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
return client.AllocateEipAddress(allocateEipAddressRequest)
},
EvalFunc: client.EvalCouldRetryResponse(allocateEipAddressRetryErrors, EvalRetryErrorType),
})
if err != nil {
state.Put("error", err)
ui.Say(fmt.Sprintf("Error allocating eip: %s", err))
return multistep.ActionHalt
return halt(state, err, "Error allocating eip")
}
ipaddress := allocateEipAddressResponse.(*ecs.AllocateEipAddressResponse).EipAddress
ui.Message(fmt.Sprintf("Allocated eip: %s", ipaddress))
allocateId := allocateEipAddressResponse.(*ecs.AllocateEipAddressResponse).AllocationId
s.allocatedId = allocateId
if err = client.WaitForEip(common.Region(s.RegionId), allocateId,
ecs.EipStatusAvailable, ALICLOUD_DEFAULT_SHORT_TIMEOUT); err != nil {
state.Put("error", err)
ui.Say(fmt.Sprintf("Error allocating eip: %s", err))
return multistep.ActionHalt
err = s.waitForEipStatus(client, instance.RegionId, s.allocatedId, EipStatusAvailable)
if err != nil {
return halt(state, err, "Error wait eip available timeout")
}
if err = client.AssociateEipAddress(allocateId, instance.InstanceId); err != nil {
state.Put("error", err)
ui.Say(fmt.Sprintf("Error binding eip: %s", err))
return multistep.ActionHalt
associateEipAddressRequest := ecs.CreateAssociateEipAddressRequest()
associateEipAddressRequest.AllocationId = allocateId
associateEipAddressRequest.InstanceId = instance.InstanceId
if _, err := client.AssociateEipAddress(associateEipAddressRequest); err != nil {
e, ok := err.(errors.Error)
if !ok || e.ErrorCode() != "TaskConflict" {
return halt(state, err, "Error associating eip")
}
ui.Error(fmt.Sprintf("Error associate eip: %s", err))
}
if err = client.WaitForEip(common.Region(s.RegionId), allocateId,
ecs.EipStatusInUse, ALICLOUD_DEFAULT_SHORT_TIMEOUT); err != nil {
state.Put("error", err)
ui.Say(fmt.Sprintf("Error associating eip: %s", err))
return multistep.ActionHalt
err = s.waitForEipStatus(client, instance.RegionId, s.allocatedId, EipStatusInUse)
if err != nil {
return halt(state, err, "Error wait eip associated timeout")
}
ui.Say(fmt.Sprintf("Allocated eip %s", ipaddress))
state.Put("ipaddress", ipaddress)
return multistep.ActionContinue
}
func (s *setpConfigAlicloudEIP) Cleanup(state multistep.StateBag) {
func (s *stepConfigAlicloudEIP) Cleanup(state multistep.StateBag) {
if len(s.allocatedId) == 0 {
return
}
client := state.Get("client").(*ecs.Client)
instance := state.Get("instance").(*ecs.InstanceAttributesType)
cleanUpMessage(state, "EIP")
client := state.Get("client").(*ClientWrapper)
instance := state.Get("instance").(*ecs.Instance)
ui := state.Get("ui").(packer.Ui)
message(state, "EIP")
if err := client.UnassociateEipAddress(s.allocatedId, instance.InstanceId); err != nil {
ui.Say(fmt.Sprintf("Failed to unassociate eip."))
unassociateEipAddressRequest := ecs.CreateUnassociateEipAddressRequest()
unassociateEipAddressRequest.AllocationId = s.allocatedId
unassociateEipAddressRequest.InstanceId = instance.InstanceId
if _, err := client.UnassociateEipAddress(unassociateEipAddressRequest); err != nil {
ui.Say(fmt.Sprintf("Failed to unassociate eip: %s", err))
}
if err := client.WaitForEip(common.Region(s.RegionId), s.allocatedId, ecs.EipStatusAvailable, ALICLOUD_DEFAULT_SHORT_TIMEOUT); err != nil {
ui.Say(fmt.Sprintf("Timeout while unassociating eip."))
}
if err := client.ReleaseEipAddress(s.allocatedId); err != nil {
ui.Say(fmt.Sprintf("Failed to release eip."))
if err := s.waitForEipStatus(client, instance.RegionId, s.allocatedId, EipStatusAvailable); err != nil {
ui.Say(fmt.Sprintf("Timeout while unassociating eip: %s", err))
}
releaseEipAddressRequest := ecs.CreateReleaseEipAddressRequest()
releaseEipAddressRequest.AllocationId = s.allocatedId
if _, err := client.ReleaseEipAddress(releaseEipAddressRequest); err != nil {
ui.Say(fmt.Sprintf("Failed to release eip: %s", err))
}
}
func (s *stepConfigAlicloudEIP) waitForEipStatus(client *ClientWrapper, regionId string, allocationId string, expectedStatus string) error {
describeEipAddressesRequest := ecs.CreateDescribeEipAddressesRequest()
describeEipAddressesRequest.RegionId = regionId
describeEipAddressesRequest.AllocationId = s.allocatedId
_, err := client.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
response, err := client.DescribeEipAddresses(describeEipAddressesRequest)
if err == nil && len(response.EipAddresses.EipAddress) == 0 {
err = fmt.Errorf("eip allocated is not find")
}
return response, err
},
EvalFunc: func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
if err != nil {
return WaitForExpectToRetry
}
eipAddressesResponse := response.(*ecs.DescribeEipAddressesResponse)
eipAddresses := eipAddressesResponse.EipAddresses.EipAddress
for _, eipAddress := range eipAddresses {
if eipAddress.Status == expectedStatus {
return WaitForExpectSuccess
}
}
return WaitForExpectToRetry
},
RetryTimes: shortRetryTimes,
})
return err
}
func (s *stepConfigAlicloudEIP) buildAllocateEipAddressRequest(state multistep.StateBag) *ecs.AllocateEipAddressRequest {
instance := state.Get("instance").(*ecs.Instance)
request := ecs.CreateAllocateEipAddressRequest()
request.ClientToken = uuid.TimeOrderedUUID()
request.RegionId = instance.RegionId
request.InternetChargeType = s.InternetChargeType
request.Bandwidth = string(convertNumber(s.InternetMaxBandwidthOut))
return request
}

View File

@ -1,82 +1,74 @@
package ecs
import (
"context"
"fmt"
"io/ioutil"
"os"
"runtime"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/hashicorp/packer/helper/communicator"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type StepConfigAlicloudKeyPair struct {
Debug bool
SSHAgentAuth bool
DebugKeyPath string
TemporaryKeyPairName string
KeyPairName string
PrivateKeyFile string
RegionId string
type stepConfigAlicloudKeyPair struct {
Debug bool
Comm *communicator.Config
DebugKeyPath string
RegionId string
keyName string
}
func (s *StepConfigAlicloudKeyPair) Run(state multistep.StateBag) multistep.StepAction {
func (s *stepConfigAlicloudKeyPair) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
ui := state.Get("ui").(packer.Ui)
if s.PrivateKeyFile != "" {
if s.Comm.SSHPrivateKeyFile != "" {
ui.Say("Using existing SSH private key")
privateKeyBytes, err := ioutil.ReadFile(s.PrivateKeyFile)
privateKeyBytes, err := s.Comm.ReadSSHPrivateKeyFile()
if err != nil {
state.Put("error", fmt.Errorf(
"Error loading configured private key file: %s", err))
state.Put("error", err)
return multistep.ActionHalt
}
state.Put("keyPair", s.KeyPairName)
state.Put("privateKey", string(privateKeyBytes))
s.Comm.SSHPrivateKey = privateKeyBytes
return multistep.ActionContinue
}
if s.SSHAgentAuth && s.KeyPairName == "" {
if s.Comm.SSHAgentAuth && s.Comm.SSHKeyPairName == "" {
ui.Say("Using SSH Agent with key pair in source image")
return multistep.ActionContinue
}
if s.SSHAgentAuth && s.KeyPairName != "" {
ui.Say(fmt.Sprintf("Using SSH Agent for existing key pair %s", s.KeyPairName))
state.Put("keyPair", s.KeyPairName)
if s.Comm.SSHAgentAuth && s.Comm.SSHKeyPairName != "" {
ui.Say(fmt.Sprintf("Using SSH Agent for existing key pair %s", s.Comm.SSHKeyPairName))
return multistep.ActionContinue
}
if s.TemporaryKeyPairName == "" {
if s.Comm.SSHTemporaryKeyPairName == "" {
ui.Say("Not using temporary keypair")
state.Put("keyPair", "")
s.Comm.SSHKeyPairName = ""
return multistep.ActionContinue
}
client := state.Get("client").(*ecs.Client)
client := state.Get("client").(*ClientWrapper)
ui.Say(fmt.Sprintf("Creating temporary keypair: %s", s.Comm.SSHTemporaryKeyPairName))
ui.Say(fmt.Sprintf("Creating temporary keypair: %s", s.TemporaryKeyPairName))
keyResp, err := client.CreateKeyPair(&ecs.CreateKeyPairArgs{
KeyPairName: s.TemporaryKeyPairName,
RegionId: common.Region(s.RegionId),
})
createKeyPairRequest := ecs.CreateCreateKeyPairRequest()
createKeyPairRequest.RegionId = s.RegionId
createKeyPairRequest.KeyPairName = s.Comm.SSHTemporaryKeyPairName
keyResp, err := client.CreateKeyPair(createKeyPairRequest)
if err != nil {
state.Put("error", fmt.Errorf("Error creating temporary keypair: %s", err))
return multistep.ActionHalt
return halt(state, err, "Error creating temporary keypair")
}
// Set the keyname so we know to delete it later
s.keyName = s.TemporaryKeyPairName
s.keyName = s.Comm.SSHTemporaryKeyPairName
// Set some state data for use in future steps
state.Put("keyPair", s.keyName)
state.Put("privateKey", keyResp.PrivateKeyBody)
s.Comm.SSHKeyPairName = s.keyName
s.Comm.SSHPrivateKey = []byte(keyResp.PrivateKeyBody)
// If we're in debug mode, output the private key to the working
// directory.
@ -107,23 +99,24 @@ func (s *StepConfigAlicloudKeyPair) Run(state multistep.StateBag) multistep.Step
return multistep.ActionContinue
}
func (s *StepConfigAlicloudKeyPair) Cleanup(state multistep.StateBag) {
func (s *stepConfigAlicloudKeyPair) Cleanup(state multistep.StateBag) {
// If no key name is set, then we never created it, so just return
// If we used an SSH private key file, do not go about deleting
// keypairs
if s.PrivateKeyFile != "" || (s.KeyPairName == "" && s.keyName == "") {
if s.Comm.SSHPrivateKeyFile != "" || (s.Comm.SSHKeyPairName == "" && s.keyName == "") {
return
}
client := state.Get("client").(*ecs.Client)
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
// Remove the keypair
ui.Say("Deleting temporary keypair...")
err := client.DeleteKeyPairs(&ecs.DeleteKeyPairsArgs{
RegionId: common.Region(s.RegionId),
KeyPairNames: "[\"" + s.keyName + "\"]",
})
deleteKeyPairsRequest := ecs.CreateDeleteKeyPairsRequest()
deleteKeyPairsRequest.RegionId = s.RegionId
deleteKeyPairsRequest.KeyPairNames = fmt.Sprintf("[\"%s\"]", s.keyName)
_, err := client.DeleteKeyPairs(deleteKeyPairsRequest)
if err != nil {
ui.Error(fmt.Sprintf(
"Error cleaning up keypair. Please delete the key manually: %s", s.keyName))

View File

@ -1,32 +1,45 @@
package ecs
import (
"context"
"fmt"
"github.com/denverdino/aliyungo/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type stepConfigAlicloudPublicIP struct {
publicIPAdress string
RegionId string
publicIPAddress string
RegionId string
SSHPrivateIp bool
}
func (s *stepConfigAlicloudPublicIP) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ecs.Client)
func (s *stepConfigAlicloudPublicIP) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
instance := state.Get("instance").(*ecs.InstanceAttributesType)
instance := state.Get("instance").(*ecs.Instance)
ipaddress, err := client.AllocatePublicIpAddress(instance.InstanceId)
if err != nil {
state.Put("error", err)
ui.Say(fmt.Sprintf("Error allocating public ip: %s", err))
return multistep.ActionHalt
if s.SSHPrivateIp {
ipaddress := instance.InnerIpAddress.IpAddress
if len(ipaddress) == 0 {
ui.Say("Failed to get private ip of instance")
return multistep.ActionHalt
}
state.Put("ipaddress", ipaddress[0])
return multistep.ActionContinue
}
s.publicIPAdress = ipaddress
ui.Say(fmt.Sprintf("Allocated public ip address %s.", ipaddress))
state.Put("ipaddress", ipaddress)
allocatePublicIpAddressRequest := ecs.CreateAllocatePublicIpAddressRequest()
allocatePublicIpAddressRequest.InstanceId = instance.InstanceId
ipaddress, err := client.AllocatePublicIpAddress(allocatePublicIpAddressRequest)
if err != nil {
return halt(state, err, "Error allocating public ip")
}
s.publicIPAddress = ipaddress.IpAddress
ui.Say(fmt.Sprintf("Allocated public ip address %s.", ipaddress.IpAddress))
state.Put("ipaddress", ipaddress.IpAddress)
return multistep.ActionContinue
}

View File

@ -1,14 +1,14 @@
package ecs
import (
"errors"
"context"
"fmt"
"time"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/hashicorp/packer/common/uuid"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type stepConfigAlicloudSecurityGroup struct {
@ -20,31 +20,34 @@ type stepConfigAlicloudSecurityGroup struct {
isCreate bool
}
func (s *stepConfigAlicloudSecurityGroup) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ecs.Client)
var createSecurityGroupRetryErrors = []string{
"IdempotentProcessing",
}
var deleteSecurityGroupRetryErrors = []string{
"DependencyViolation",
}
func (s *stepConfigAlicloudSecurityGroup) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
networkType := state.Get("networktype").(InstanceNetWork)
var securityGroupItems []ecs.SecurityGroupItemType
var err error
if len(s.SecurityGroupId) != 0 {
if networkType == VpcNet {
describeSecurityGroupsRequest := ecs.CreateDescribeSecurityGroupsRequest()
describeSecurityGroupsRequest.RegionId = s.RegionId
if networkType == InstanceNetworkVpc {
vpcId := state.Get("vpcid").(string)
securityGroupItems, _, err = client.DescribeSecurityGroups(&ecs.DescribeSecurityGroupsArgs{
VpcId: vpcId,
RegionId: common.Region(s.RegionId),
})
} else {
securityGroupItems, _, err = client.DescribeSecurityGroups(&ecs.DescribeSecurityGroupsArgs{
RegionId: common.Region(s.RegionId),
})
describeSecurityGroupsRequest.VpcId = vpcId
}
securityGroupsResponse, err := client.DescribeSecurityGroups(describeSecurityGroupsRequest)
if err != nil {
ui.Say(fmt.Sprintf("Failed querying security group: %s", err))
state.Put("error", err)
return multistep.ActionHalt
return halt(state, err, "Failed querying security group")
}
securityGroupItems := securityGroupsResponse.SecurityGroups.SecurityGroup
for _, securityGroupItem := range securityGroupItems {
if securityGroupItem.SecurityGroupId == s.SecurityGroupId {
state.Put("securitygroupid", s.SecurityGroupId)
@ -52,61 +55,55 @@ func (s *stepConfigAlicloudSecurityGroup) Run(state multistep.StateBag) multiste
return multistep.ActionContinue
}
}
s.isCreate = false
message := fmt.Sprintf("The specified security group {%s} doesn't exist.", s.SecurityGroupId)
state.Put("error", errors.New(message))
ui.Say(message)
return multistep.ActionHalt
s.isCreate = false
err = fmt.Errorf("The specified security group {%s} doesn't exist.", s.SecurityGroupId)
return halt(state, err, "")
}
var securityGroupId string
ui.Say("Creating security groups...")
if networkType == VpcNet {
vpcId := state.Get("vpcid").(string)
securityGroupId, err = client.CreateSecurityGroup(&ecs.CreateSecurityGroupArgs{
RegionId: common.Region(s.RegionId),
SecurityGroupName: s.SecurityGroupName,
VpcId: vpcId,
})
} else {
securityGroupId, err = client.CreateSecurityGroup(&ecs.CreateSecurityGroupArgs{
RegionId: common.Region(s.RegionId),
SecurityGroupName: s.SecurityGroupName,
})
}
ui.Say("Creating security group...")
createSecurityGroupRequest := s.buildCreateSecurityGroupRequest(state)
securityGroupResponse, err := client.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
return client.CreateSecurityGroup(createSecurityGroupRequest)
},
EvalFunc: client.EvalCouldRetryResponse(createSecurityGroupRetryErrors, EvalRetryErrorType),
})
if err != nil {
state.Put("error", err)
ui.Say(fmt.Sprintf("Failed creating security group %s.", err))
return multistep.ActionHalt
return halt(state, err, "Failed creating security group")
}
securityGroupId := securityGroupResponse.(*ecs.CreateSecurityGroupResponse).SecurityGroupId
ui.Message(fmt.Sprintf("Created security group: %s", securityGroupId))
state.Put("securitygroupid", securityGroupId)
s.isCreate = true
s.SecurityGroupId = securityGroupId
err = client.AuthorizeSecurityGroupEgress(&ecs.AuthorizeSecurityGroupEgressArgs{
SecurityGroupId: securityGroupId,
RegionId: common.Region(s.RegionId),
IpProtocol: ecs.IpProtocolAll,
PortRange: "-1/-1",
NicType: ecs.NicTypeInternet,
DestCidrIp: "0.0.0.0/0", //The input parameter "DestGroupId" or "DestCidrIp" cannot be both blank.
})
if err != nil {
state.Put("error", err)
ui.Say(fmt.Sprintf("Failed authorizing security group: %s", err))
return multistep.ActionHalt
authorizeSecurityGroupEgressRequest := ecs.CreateAuthorizeSecurityGroupEgressRequest()
authorizeSecurityGroupEgressRequest.SecurityGroupId = securityGroupId
authorizeSecurityGroupEgressRequest.RegionId = s.RegionId
authorizeSecurityGroupEgressRequest.IpProtocol = IpProtocolAll
authorizeSecurityGroupEgressRequest.PortRange = DefaultPortRange
authorizeSecurityGroupEgressRequest.NicType = NicTypeInternet
authorizeSecurityGroupEgressRequest.DestCidrIp = DefaultCidrIp
if _, err := client.AuthorizeSecurityGroupEgress(authorizeSecurityGroupEgressRequest); err != nil {
return halt(state, err, "Failed authorizing security group")
}
err = client.AuthorizeSecurityGroup(&ecs.AuthorizeSecurityGroupArgs{
SecurityGroupId: securityGroupId,
RegionId: common.Region(s.RegionId),
IpProtocol: ecs.IpProtocolAll,
PortRange: "-1/-1",
NicType: ecs.NicTypeInternet,
SourceCidrIp: "0.0.0.0/0", //The input parameter "SourceGroupId" or "SourceCidrIp" cannot be both blank.
})
if err != nil {
state.Put("error", err)
ui.Say(fmt.Sprintf("Failed authorizing security group: %s", err))
return multistep.ActionHalt
authorizeSecurityGroupRequest := ecs.CreateAuthorizeSecurityGroupRequest()
authorizeSecurityGroupRequest.SecurityGroupId = securityGroupId
authorizeSecurityGroupRequest.RegionId = s.RegionId
authorizeSecurityGroupRequest.IpProtocol = IpProtocolAll
authorizeSecurityGroupRequest.PortRange = DefaultPortRange
authorizeSecurityGroupRequest.NicType = NicTypeInternet
authorizeSecurityGroupRequest.SourceCidrIp = DefaultCidrIp
if _, err := client.AuthorizeSecurityGroup(authorizeSecurityGroupRequest); err != nil {
return halt(state, err, "Failed authorizing security group")
}
return multistep.ActionContinue
@ -117,21 +114,39 @@ func (s *stepConfigAlicloudSecurityGroup) Cleanup(state multistep.StateBag) {
return
}
client := state.Get("client").(*ecs.Client)
cleanUpMessage(state, "security group")
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
message(state, "security group")
timeoutPoint := time.Now().Add(120 * time.Second)
for {
if err := client.DeleteSecurityGroup(common.Region(s.RegionId), s.SecurityGroupId); err != nil {
e, _ := err.(*common.Error)
if e.Code == "DependencyViolation" && time.Now().Before(timeoutPoint) {
time.Sleep(5 * time.Second)
continue
}
ui.Error(fmt.Sprintf("Failed to delete security group, it may still be around: %s", err))
return
}
break
_, err := client.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
request := ecs.CreateDeleteSecurityGroupRequest()
request.RegionId = s.RegionId
request.SecurityGroupId = s.SecurityGroupId
return client.DeleteSecurityGroup(request)
},
EvalFunc: client.EvalCouldRetryResponse(deleteSecurityGroupRetryErrors, EvalRetryErrorType),
RetryTimes: shortRetryTimes,
})
if err != nil {
ui.Error(fmt.Sprintf("Failed to delete security group, it may still be around: %s", err))
}
}
func (s *stepConfigAlicloudSecurityGroup) buildCreateSecurityGroupRequest(state multistep.StateBag) *ecs.CreateSecurityGroupRequest {
networkType := state.Get("networktype").(InstanceNetWork)
request := ecs.CreateCreateSecurityGroupRequest()
request.ClientToken = uuid.TimeOrderedUUID()
request.RegionId = s.RegionId
request.SecurityGroupName = s.SecurityGroupName
if networkType == InstanceNetworkVpc {
vpcId := state.Get("vpcid").(string)
request.VpcId = vpcId
}
return request
}

View File

@ -1,14 +1,15 @@
package ecs
import (
"errors"
"context"
errorsNew "errors"
"fmt"
"time"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/hashicorp/packer/common/uuid"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type stepConfigAlicloudVPC struct {
@ -18,54 +19,94 @@ type stepConfigAlicloudVPC struct {
isCreate bool
}
func (s *stepConfigAlicloudVPC) Run(state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(Config)
client := state.Get("client").(*ecs.Client)
var createVpcRetryErrors = []string{
"TOKEN_PROCESSING",
}
var deleteVpcRetryErrors = []string{
"DependencyViolation.Instance",
"DependencyViolation.RouteEntry",
"DependencyViolation.VSwitch",
"DependencyViolation.SecurityGroup",
"Forbbiden",
"TaskConflict",
}
func (s *stepConfigAlicloudVPC) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
if len(s.VpcId) != 0 {
vpcs, _, err := client.DescribeVpcs(&ecs.DescribeVpcsArgs{
VpcId: s.VpcId,
RegionId: common.Region(config.AlicloudRegion),
})
describeVpcsRequest := ecs.CreateDescribeVpcsRequest()
describeVpcsRequest.VpcId = s.VpcId
describeVpcsRequest.RegionId = config.AlicloudRegion
vpcsResponse, err := client.DescribeVpcs(describeVpcsRequest)
if err != nil {
ui.Say(fmt.Sprintf("Failed querying vpcs: %s", err))
state.Put("error", err)
return multistep.ActionHalt
return halt(state, err, "Failed querying vpcs")
}
vpcs := vpcsResponse.Vpcs.Vpc
if len(vpcs) > 0 {
vpc := vpcs[0]
state.Put("vpcid", vpc.VpcId)
state.Put("vpcid", vpcs[0].VpcId)
s.isCreate = false
return multistep.ActionContinue
}
message := fmt.Sprintf("The specified vpc {%s} doesn't exist.", s.VpcId)
state.Put("error", errors.New(message))
ui.Say(message)
return multistep.ActionHalt
message := fmt.Sprintf("The specified vpc {%s} doesn't exist.", s.VpcId)
return halt(state, errorsNew.New(message), "")
}
ui.Say("Creating vpc")
vpc, err := client.CreateVpc(&ecs.CreateVpcArgs{
RegionId: common.Region(config.AlicloudRegion),
CidrBlock: s.CidrBlock,
VpcName: s.VpcName,
ui.Say("Creating vpc...")
createVpcRequest := s.buildCreateVpcRequest(state)
createVpcResponse, err := client.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
return client.CreateVpc(createVpcRequest)
},
EvalFunc: client.EvalCouldRetryResponse(createVpcRetryErrors, EvalRetryErrorType),
})
if err != nil {
state.Put("error", err)
ui.Say(fmt.Sprintf("Failed creating vpc: %s", err))
return multistep.ActionHalt
}
err = client.WaitForVpcAvailable(common.Region(config.AlicloudRegion), vpc.VpcId, ALICLOUD_DEFAULT_SHORT_TIMEOUT)
if err != nil {
state.Put("error", err)
ui.Say(fmt.Sprintf("Failed waiting for vpc to become available: %s", err))
return multistep.ActionHalt
return halt(state, err, "Failed creating vpc")
}
state.Put("vpcid", vpc.VpcId)
vpcId := createVpcResponse.(*ecs.CreateVpcResponse).VpcId
_, err = client.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
request := ecs.CreateDescribeVpcsRequest()
request.RegionId = config.AlicloudRegion
request.VpcId = vpcId
return client.DescribeVpcs(request)
},
EvalFunc: func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
if err != nil {
return WaitForExpectToRetry
}
vpcsResponse := response.(*ecs.DescribeVpcsResponse)
vpcs := vpcsResponse.Vpcs.Vpc
if len(vpcs) > 0 {
for _, vpc := range vpcs {
if vpc.Status == VpcStatusAvailable {
return WaitForExpectSuccess
}
}
}
return WaitForExpectToRetry
},
RetryTimes: shortRetryTimes,
})
if err != nil {
return halt(state, err, "Failed waiting for vpc to become available")
}
ui.Message(fmt.Sprintf("Created vpc: %s", vpcId))
state.Put("vpcid", vpcId)
s.isCreate = true
s.VpcId = vpc.VpcId
s.VpcId = vpcId
return multistep.ActionContinue
}
@ -74,23 +115,34 @@ func (s *stepConfigAlicloudVPC) Cleanup(state multistep.StateBag) {
return
}
client := state.Get("client").(*ecs.Client)
cleanUpMessage(state, "VPC")
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
message(state, "VPC")
timeoutPoint := time.Now().Add(60 * time.Second)
for {
if err := client.DeleteVpc(s.VpcId); err != nil {
e, _ := err.(*common.Error)
if (e.Code == "DependencyViolation.Instance" || e.Code == "DependencyViolation.RouteEntry" ||
e.Code == "DependencyViolation.VSwitch" ||
e.Code == "DependencyViolation.SecurityGroup") && time.Now().Before(timeoutPoint) {
time.Sleep(1 * time.Second)
continue
}
ui.Error(fmt.Sprintf("Error deleting vpc, it may still be around: %s", err))
return
}
break
_, err := client.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
request := ecs.CreateDeleteVpcRequest()
request.VpcId = s.VpcId
return client.DeleteVpc(request)
},
EvalFunc: client.EvalCouldRetryResponse(deleteVpcRetryErrors, EvalRetryErrorType),
RetryTimes: shortRetryTimes,
})
if err != nil {
ui.Error(fmt.Sprintf("Error deleting vpc, it may still be around: %s", err))
}
}
func (s *stepConfigAlicloudVPC) buildCreateVpcRequest(state multistep.StateBag) *ecs.CreateVpcRequest {
config := state.Get("config").(*Config)
request := ecs.CreateCreateVpcRequest()
request.ClientToken = uuid.TimeOrderedUUID()
request.RegionId = config.AlicloudRegion
request.CidrBlock = s.CidrBlock
request.VpcName = s.VpcName
return request
}

View File

@ -1,14 +1,14 @@
package ecs
import (
"errors"
"context"
"fmt"
"time"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/hashicorp/packer/common/uuid"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type stepConfigAlicloudVSwitch struct {
@ -19,52 +19,65 @@ type stepConfigAlicloudVSwitch struct {
VSwitchName string
}
func (s *stepConfigAlicloudVSwitch) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ecs.Client)
var createVSwitchRetryErrors = []string{
"TOKEN_PROCESSING",
}
var deleteVSwitchRetryErrors = []string{
"IncorrectVSwitchStatus",
"DependencyViolation",
"DependencyViolation.HaVip",
"IncorrectRouteEntryStatus",
"TaskConflict",
}
func (s *stepConfigAlicloudVSwitch) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
vpcId := state.Get("vpcid").(string)
config := state.Get("config").(Config)
config := state.Get("config").(*Config)
if len(s.VSwitchId) != 0 {
vswitchs, _, err := client.DescribeVSwitches(&ecs.DescribeVSwitchesArgs{
VpcId: vpcId,
VSwitchId: s.VSwitchId,
ZoneId: s.ZoneId,
})
describeVSwitchesRequest := ecs.CreateDescribeVSwitchesRequest()
describeVSwitchesRequest.VpcId = vpcId
describeVSwitchesRequest.VSwitchId = s.VSwitchId
describeVSwitchesRequest.ZoneId = s.ZoneId
vswitchesResponse, err := client.DescribeVSwitches(describeVSwitchesRequest)
if err != nil {
ui.Say(fmt.Sprintf("Failed querying vswitch: %s", err))
state.Put("error", err)
return multistep.ActionHalt
return halt(state, err, "Failed querying vswitch")
}
if len(vswitchs) > 0 {
vswitch := vswitchs[0]
state.Put("vswitchid", vswitch.VSwitchId)
vswitch := vswitchesResponse.VSwitches.VSwitch
if len(vswitch) > 0 {
state.Put("vswitchid", vswitch[0].VSwitchId)
s.isCreate = false
return multistep.ActionContinue
}
s.isCreate = false
message := fmt.Sprintf("The specified vswitch {%s} doesn't exist.", s.VSwitchId)
state.Put("error", errors.New(message))
ui.Say(message)
return multistep.ActionHalt
return halt(state, fmt.Errorf("The specified vswitch {%s} doesn't exist.", s.VSwitchId), "")
}
if s.ZoneId == "" {
zones, err := client.DescribeZones(common.Region(config.AlicloudRegion))
if s.ZoneId == "" {
describeZonesRequest := ecs.CreateDescribeZonesRequest()
describeZonesRequest.RegionId = config.AlicloudRegion
zonesResponse, err := client.DescribeZones(describeZonesRequest)
if err != nil {
ui.Say(fmt.Sprintf("Query for available zones failed: %s", err))
state.Put("error", err)
return multistep.ActionHalt
return halt(state, err, "Query for available zones failed")
}
var instanceTypes []string
zones := zonesResponse.Zones.Zone
for _, zone := range zones {
isVSwitchSupported := false
for _, resourceType := range zone.AvailableResourceCreation.ResourceTypes {
if resourceType == ecs.ResourceTypeVSwitch {
if resourceType == "VSwitch" {
isVSwitchSupported = true
}
}
if isVSwitchSupported {
for _, instanceType := range zone.AvailableInstanceTypes.InstanceTypes {
if instanceType == config.InstanceType {
@ -96,29 +109,62 @@ func (s *stepConfigAlicloudVSwitch) Run(state multistep.StateBag) multistep.Step
}
}
}
if config.CidrBlock == "" {
s.CidrBlock = "172.16.0.0/24" //use the default CirdBlock
s.CidrBlock = DefaultCidrBlock //use the default CirdBlock
}
ui.Say("Creating vswitch...")
vswitchId, err := client.CreateVSwitch(&ecs.CreateVSwitchArgs{
CidrBlock: s.CidrBlock,
ZoneId: s.ZoneId,
VpcId: vpcId,
VSwitchName: s.VSwitchName,
createVSwitchRequest := s.buildCreateVSwitchRequest(state)
createVSwitchResponse, err := client.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
return client.CreateVSwitch(createVSwitchRequest)
},
EvalFunc: client.EvalCouldRetryResponse(createVSwitchRetryErrors, EvalRetryErrorType),
})
if err != nil {
state.Put("error", err)
ui.Say(fmt.Sprintf("Create vswitch failed %v", err))
return multistep.ActionHalt
return halt(state, err, "Error Creating vswitch")
}
if err := client.WaitForVSwitchAvailable(vpcId, s.VSwitchId, ALICLOUD_DEFAULT_TIMEOUT); err != nil {
state.Put("error", err)
ui.Error(fmt.Sprintf("Timeout waiting for vswitch to become available: %v", err))
return multistep.ActionHalt
vSwitchId := createVSwitchResponse.(*ecs.CreateVSwitchResponse).VSwitchId
describeVSwitchesRequest := ecs.CreateDescribeVSwitchesRequest()
describeVSwitchesRequest.VpcId = vpcId
describeVSwitchesRequest.VSwitchId = vSwitchId
_, err = client.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
return client.DescribeVSwitches(describeVSwitchesRequest)
},
EvalFunc: func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
if err != nil {
return WaitForExpectToRetry
}
vSwitchesResponse := response.(*ecs.DescribeVSwitchesResponse)
vSwitches := vSwitchesResponse.VSwitches.VSwitch
if len(vSwitches) > 0 {
for _, vSwitch := range vSwitches {
if vSwitch.Status == VSwitchStatusAvailable {
return WaitForExpectSuccess
}
}
}
return WaitForExpectToRetry
},
RetryTimes: shortRetryTimes,
})
if err != nil {
return halt(state, err, "Timeout waiting for vswitch to become available")
}
state.Put("vswitchid", vswitchId)
ui.Message(fmt.Sprintf("Created vswitch: %s", vSwitchId))
state.Put("vswitchid", vSwitchId)
s.isCreate = true
s.VSwitchId = vswitchId
s.VSwitchId = vSwitchId
return multistep.ActionContinue
}
@ -127,22 +173,35 @@ func (s *stepConfigAlicloudVSwitch) Cleanup(state multistep.StateBag) {
return
}
client := state.Get("client").(*ecs.Client)
cleanUpMessage(state, "vSwitch")
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
message(state, "vSwitch")
timeoutPoint := time.Now().Add(10 * time.Second)
for {
if err := client.DeleteVSwitch(s.VSwitchId); err != nil {
e, _ := err.(*common.Error)
if (e.Code == "IncorrectVSwitchStatus" || e.Code == "DependencyViolation" ||
e.Code == "DependencyViolation.HaVip" ||
e.Code == "IncorretRouteEntryStatus") && time.Now().Before(timeoutPoint) {
time.Sleep(1 * time.Second)
continue
}
ui.Error(fmt.Sprintf("Error deleting vswitch, it may still be around: %s", err))
return
}
break
_, err := client.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
request := ecs.CreateDeleteVSwitchRequest()
request.VSwitchId = s.VSwitchId
return client.DeleteVSwitch(request)
},
EvalFunc: client.EvalCouldRetryResponse(deleteVSwitchRetryErrors, EvalRetryErrorType),
RetryTimes: shortRetryTimes,
})
if err != nil {
ui.Error(fmt.Sprintf("Error deleting vswitch, it may still be around: %s", err))
}
}
func (s *stepConfigAlicloudVSwitch) buildCreateVSwitchRequest(state multistep.StateBag) *ecs.CreateVSwitchRequest {
vpcId := state.Get("vpcid").(string)
request := ecs.CreateCreateVSwitchRequest()
request.ClientToken = uuid.TimeOrderedUUID()
request.CidrBlock = s.CidrBlock
request.ZoneId = s.ZoneId
request.VpcId = vpcId
request.VSwitchName = s.VSwitchName
return request
}

View File

@ -1,70 +1,77 @@
package ecs
import (
"context"
"fmt"
"time"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/hashicorp/packer/common/random"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/hashicorp/packer/common/uuid"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type stepCreateAlicloudImage struct {
image *ecs.ImageType
AlicloudImageIgnoreDataDisks bool
WaitSnapshotReadyTimeout int
image *ecs.Image
}
func (s *stepCreateAlicloudImage) Run(state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(Config)
client := state.Get("client").(*ecs.Client)
var createImageRetryErrors = []string{
"IdempotentProcessing",
}
func (s *stepCreateAlicloudImage) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
// Create the alicloud image
ui.Say(fmt.Sprintf("Creating image: %s", config.AlicloudImageName))
var imageId string
var err error
instance := state.Get("instance").(*ecs.InstanceAttributesType)
imageId, err = client.CreateImage(&ecs.CreateImageArgs{
RegionId: common.Region(config.AlicloudRegion),
InstanceId: instance.InstanceId,
ImageName: config.AlicloudImageName,
ImageVersion: config.AlicloudImageVersion,
Description: config.AlicloudImageDescription})
if err != nil {
err := fmt.Errorf("Error creating image: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
err = client.WaitForImageReady(common.Region(config.AlicloudRegion),
imageId, ALICLOUD_DEFAULT_LONG_TIMEOUT)
if err != nil {
err := fmt.Errorf("Timeout waiting for image to be created: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
tempImageName := config.AlicloudImageName
if config.ImageEncrypted.True() {
tempImageName = fmt.Sprintf("packer_%s", random.AlphaNum(7))
ui.Say(fmt.Sprintf("Creating temporary image for encryption: %s", tempImageName))
} else {
ui.Say(fmt.Sprintf("Creating image: %s", tempImageName))
}
images, _, err := client.DescribeImages(&ecs.DescribeImagesArgs{
RegionId: common.Region(config.AlicloudRegion),
ImageId: imageId})
createImageRequest := s.buildCreateImageRequest(state, tempImageName)
createImageResponse, err := client.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
return client.CreateImage(createImageRequest)
},
EvalFunc: client.EvalCouldRetryResponse(createImageRetryErrors, EvalRetryErrorType),
})
if err != nil {
err := fmt.Errorf("Error querying created image: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
return halt(state, err, "Error creating image")
}
imageId := createImageResponse.(*ecs.CreateImageResponse).ImageId
imagesResponse, err := client.WaitForImageStatus(config.AlicloudRegion, imageId, ImageStatusAvailable, time.Duration(s.WaitSnapshotReadyTimeout)*time.Second)
// save image first for cleaning up if timeout
images := imagesResponse.(*ecs.DescribeImagesResponse).Images.Image
if len(images) == 0 {
err := fmt.Errorf("Unable to find created image: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
return halt(state, err, "Unable to find created image")
}
s.image = &images[0]
if err != nil {
return halt(state, err, "Timeout waiting for image to be created")
}
var snapshotIds []string
for _, device := range images[0].DiskDeviceMappings.DiskDeviceMapping {
snapshotIds = append(snapshotIds, device.SnapshotId)
}
state.Put("alicloudimage", imageId)
state.Put("alicloudsnapshots", snapshotIds)
alicloudImages := make(map[string]string)
alicloudImages[config.AlicloudRegion] = images[0].ImageId
state.Put("alicloudimages", alicloudImages)
@ -76,19 +83,62 @@ func (s *stepCreateAlicloudImage) Cleanup(state multistep.StateBag) {
if s.image == nil {
return
}
config := state.Get("config").(*Config)
encryptedSet := config.ImageEncrypted.True()
_, cancelled := state.GetOk(multistep.StateCancelled)
_, halted := state.GetOk(multistep.StateHalted)
if !cancelled && !halted {
if !cancelled && !halted && !encryptedSet {
return
}
client := state.Get("client").(*ecs.Client)
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
config := state.Get("config").(Config)
ui.Say("Deleting the image because of cancellation or error...")
if err := client.DeleteImage(common.Region(config.AlicloudRegion), s.image.ImageId); err != nil {
if !cancelled && !halted && encryptedSet {
ui.Say(fmt.Sprintf("Deleting temporary image %s(%s) and related snapshots after finishing encryption...", s.image.ImageId, s.image.ImageName))
} else {
ui.Say("Deleting the image and related snapshots because of cancellation or error...")
}
deleteImageRequest := ecs.CreateDeleteImageRequest()
deleteImageRequest.RegionId = config.AlicloudRegion
deleteImageRequest.ImageId = s.image.ImageId
if _, err := client.DeleteImage(deleteImageRequest); err != nil {
ui.Error(fmt.Sprintf("Error deleting image, it may still be around: %s", err))
return
}
//Delete the snapshot of this image
for _, diskDevices := range s.image.DiskDeviceMappings.DiskDeviceMapping {
deleteSnapshotRequest := ecs.CreateDeleteSnapshotRequest()
deleteSnapshotRequest.SnapshotId = diskDevices.SnapshotId
if _, err := client.DeleteSnapshot(deleteSnapshotRequest); err != nil {
ui.Error(fmt.Sprintf("Error deleting snapshot, it may still be around: %s", err))
return
}
}
}
func (s *stepCreateAlicloudImage) buildCreateImageRequest(state multistep.StateBag, imageName string) *ecs.CreateImageRequest {
config := state.Get("config").(*Config)
request := ecs.CreateCreateImageRequest()
request.ClientToken = uuid.TimeOrderedUUID()
request.RegionId = config.AlicloudRegion
request.ImageName = imageName
request.ImageVersion = config.AlicloudImageVersion
request.Description = config.AlicloudImageDescription
if s.AlicloudImageIgnoreDataDisks {
snapshotId := state.Get("alicloudsnapshot").(string)
request.SnapshotId = snapshotId
} else {
instance := state.Get("instance").(*ecs.Instance)
request.InstanceId = instance.InstanceId
}
return request
}

View File

@ -1,18 +1,24 @@
package ecs
import (
"context"
"encoding/base64"
"fmt"
"io/ioutil"
"log"
"strconv"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/hashicorp/packer/common/uuid"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
confighelper "github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type stepCreateAlicloudInstance struct {
IOOptimized bool
IOOptimized confighelper.Trilean
InstanceType string
UserData string
UserDataFile string
@ -20,100 +26,57 @@ type stepCreateAlicloudInstance struct {
RegionId string
InternetChargeType string
InternetMaxBandwidthOut int
InstnaceName string
InstanceName string
ZoneId string
instance *ecs.InstanceAttributesType
instance *ecs.Instance
}
func (s *stepCreateAlicloudInstance) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ecs.Client)
config := state.Get("config").(Config)
ui := state.Get("ui").(packer.Ui)
source_image := state.Get("source_image").(*ecs.ImageType)
network_type := state.Get("networktype").(InstanceNetWork)
securityGroupId := state.Get("securitygroupid").(string)
var instanceId string
var err error
var createInstanceRetryErrors = []string{
"IdempotentProcessing",
}
ioOptimized := ecs.IoOptimizedNone
if s.IOOptimized {
ioOptimized = ecs.IoOptimizedOptimized
}
password := config.Comm.SSHPassword
if password == "" && config.Comm.WinRMPassword != "" {
password = config.Comm.WinRMPassword
}
ui.Say("Creating instance.")
if network_type == VpcNet {
userData, err := s.getUserData(state)
if err != nil {
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
vswitchId := state.Get("vswitchid").(string)
instanceId, err = client.CreateInstance(&ecs.CreateInstanceArgs{
RegionId: common.Region(s.RegionId),
ImageId: source_image.ImageId,
InstanceType: s.InstanceType,
InternetChargeType: common.InternetChargeType(s.InternetChargeType), //"PayByTraffic",
InternetMaxBandwidthOut: s.InternetMaxBandwidthOut,
UserData: userData,
IoOptimized: ioOptimized,
VSwitchId: vswitchId,
SecurityGroupId: securityGroupId,
InstanceName: s.InstnaceName,
Password: password,
ZoneId: s.ZoneId,
DataDisk: diskDeviceToDiskType(config.AlicloudImageConfig.ECSImagesDiskMappings),
})
if err != nil {
err := fmt.Errorf("Error creating instance: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
} else {
if s.InstanceType == "" {
s.InstanceType = "PayByTraffic"
}
if s.InternetMaxBandwidthOut == 0 {
s.InternetMaxBandwidthOut = 5
}
instanceId, err = client.CreateInstance(&ecs.CreateInstanceArgs{
RegionId: common.Region(s.RegionId),
ImageId: source_image.ImageId,
InstanceType: s.InstanceType,
InternetChargeType: common.InternetChargeType(s.InternetChargeType), //"PayByTraffic",
InternetMaxBandwidthOut: s.InternetMaxBandwidthOut,
IoOptimized: ioOptimized,
SecurityGroupId: securityGroupId,
InstanceName: s.InstnaceName,
Password: password,
ZoneId: s.ZoneId,
DataDisk: diskDeviceToDiskType(config.AlicloudImageConfig.ECSImagesDiskMappings),
})
if err != nil {
err := fmt.Errorf("Error creating instance: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
}
err = client.WaitForInstance(instanceId, ecs.Stopped, ALICLOUD_DEFAULT_TIMEOUT)
var deleteInstanceRetryErrors = []string{
"IncorrectInstanceStatus.Initializing",
}
func (s *stepCreateAlicloudInstance) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
ui.Say("Creating instance...")
createInstanceRequest, err := s.buildCreateInstanceRequest(state)
if err != nil {
err := fmt.Errorf("Error creating instance: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
return halt(state, err, "")
}
instance, err := client.DescribeInstanceAttribute(instanceId)
createInstanceResponse, err := client.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
return client.CreateInstance(createInstanceRequest)
},
EvalFunc: client.EvalCouldRetryResponse(createInstanceRetryErrors, EvalRetryErrorType),
})
if err != nil {
ui.Say(err.Error())
return multistep.ActionHalt
return halt(state, err, "Error creating instance")
}
s.instance = instance
state.Put("instance", instance)
instanceId := createInstanceResponse.(*ecs.CreateInstanceResponse).InstanceId
_, err = client.WaitForInstanceStatus(s.RegionId, instanceId, InstanceStatusStopped)
if err != nil {
return halt(state, err, "Error waiting create instance")
}
describeInstancesRequest := ecs.CreateDescribeInstancesRequest()
describeInstancesRequest.InstanceIds = fmt.Sprintf("[\"%s\"]", instanceId)
instances, err := client.DescribeInstances(describeInstancesRequest)
if err != nil {
return halt(state, err, "")
}
ui.Message(fmt.Sprintf("Created instance: %s", instanceId))
s.instance = &instances.Instances.Instance[0]
state.Put("instance", s.instance)
return multistep.ActionContinue
}
@ -122,42 +85,121 @@ func (s *stepCreateAlicloudInstance) Cleanup(state multistep.StateBag) {
if s.instance == nil {
return
}
message(state, "instance")
client := state.Get("client").(*ecs.Client)
cleanUpMessage(state, "instance")
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
err := client.DeleteInstance(s.instance.InstanceId)
_, err := client.WaitForExpected(&WaitForExpectArgs{
RequestFunc: func() (responses.AcsResponse, error) {
request := ecs.CreateDeleteInstanceRequest()
request.InstanceId = s.instance.InstanceId
request.Force = requests.NewBoolean(true)
return client.DeleteInstance(request)
},
EvalFunc: client.EvalCouldRetryResponse(deleteInstanceRetryErrors, EvalRetryErrorType),
RetryTimes: shortRetryTimes,
})
if err != nil {
ui.Say(fmt.Sprintf("Failed to clean up instance %s: %v", s.instance.InstanceId, err.Error()))
ui.Say(fmt.Sprintf("Failed to clean up instance %s: %s", s.instance.InstanceId, err))
}
}
func (s *stepCreateAlicloudInstance) buildCreateInstanceRequest(state multistep.StateBag) (*ecs.CreateInstanceRequest, error) {
request := ecs.CreateCreateInstanceRequest()
request.ClientToken = uuid.TimeOrderedUUID()
request.RegionId = s.RegionId
request.InstanceType = s.InstanceType
request.InstanceName = s.InstanceName
request.ZoneId = s.ZoneId
sourceImage := state.Get("source_image").(*ecs.Image)
request.ImageId = sourceImage.ImageId
securityGroupId := state.Get("securitygroupid").(string)
request.SecurityGroupId = securityGroupId
networkType := state.Get("networktype").(InstanceNetWork)
if networkType == InstanceNetworkVpc {
vswitchId := state.Get("vswitchid").(string)
request.VSwitchId = vswitchId
userData, err := s.getUserData(state)
if err != nil {
return nil, err
}
request.UserData = userData
} else {
if s.InternetChargeType == "" {
s.InternetChargeType = "PayByTraffic"
}
if s.InternetMaxBandwidthOut == 0 {
s.InternetMaxBandwidthOut = 5
}
}
request.InternetChargeType = s.InternetChargeType
request.InternetMaxBandwidthOut = requests.Integer(convertNumber(s.InternetMaxBandwidthOut))
if s.IOOptimized.True() {
request.IoOptimized = IOOptimizedOptimized
} else if s.IOOptimized.False() {
request.IoOptimized = IOOptimizedNone
}
config := state.Get("config").(*Config)
password := config.Comm.SSHPassword
if password == "" && config.Comm.WinRMPassword != "" {
password = config.Comm.WinRMPassword
}
request.Password = password
systemDisk := config.AlicloudImageConfig.ECSSystemDiskMapping
request.SystemDiskDiskName = systemDisk.DiskName
request.SystemDiskCategory = systemDisk.DiskCategory
request.SystemDiskSize = requests.Integer(convertNumber(systemDisk.DiskSize))
request.SystemDiskDescription = systemDisk.Description
imageDisks := config.AlicloudImageConfig.ECSImagesDiskMappings
var dataDisks []ecs.CreateInstanceDataDisk
for _, imageDisk := range imageDisks {
var dataDisk ecs.CreateInstanceDataDisk
dataDisk.DiskName = imageDisk.DiskName
dataDisk.Category = imageDisk.DiskCategory
dataDisk.Size = string(convertNumber(imageDisk.DiskSize))
dataDisk.SnapshotId = imageDisk.SnapshotId
dataDisk.Description = imageDisk.Description
dataDisk.DeleteWithInstance = strconv.FormatBool(imageDisk.DeleteWithInstance)
dataDisk.Device = imageDisk.Device
if imageDisk.Encrypted != confighelper.TriUnset {
dataDisk.Encrypted = strconv.FormatBool(imageDisk.Encrypted.True())
}
dataDisks = append(dataDisks, dataDisk)
}
request.DataDisk = &dataDisks
return request, nil
}
func (s *stepCreateAlicloudInstance) getUserData(state multistep.StateBag) (string, error) {
userData := s.UserData
if s.UserDataFile != "" {
data, err := ioutil.ReadFile(s.UserDataFile)
if err != nil {
return "", err
}
userData = string(data)
}
log.Printf(userData)
if userData != "" {
userData = base64.StdEncoding.EncodeToString([]byte(userData))
}
return userData, nil
}
func diskDeviceToDiskType(diskDevices []AlicloudDiskDevice) []ecs.DataDiskType {
result := make([]ecs.DataDiskType, len(diskDevices))
for _, diskDevice := range diskDevices {
result = append(result, ecs.DataDiskType{
DiskName: diskDevice.DiskName,
Category: ecs.DiskCategory(diskDevice.DiskCategory),
Size: diskDevice.DiskSize,
SnapshotId: diskDevice.SnapshotId,
Description: diskDevice.Description,
DeleteWithInstance: diskDevice.DeleteWithInstance,
Device: diskDevice.Device,
})
}
return result
}

View File

@ -0,0 +1,90 @@
package ecs
import (
"context"
"fmt"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
"time"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
type stepCreateAlicloudSnapshot struct {
snapshot *ecs.Snapshot
WaitSnapshotReadyTimeout int
}
func (s *stepCreateAlicloudSnapshot) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
instance := state.Get("instance").(*ecs.Instance)
describeDisksRequest := ecs.CreateDescribeDisksRequest()
describeDisksRequest.RegionId = config.AlicloudRegion
describeDisksRequest.InstanceId = instance.InstanceId
describeDisksRequest.DiskType = DiskTypeSystem
disksResponse, err := client.DescribeDisks(describeDisksRequest)
if err != nil {
return halt(state, err, "Error describe disks")
}
disks := disksResponse.Disks.Disk
if len(disks) == 0 {
return halt(state, err, "Unable to find system disk of instance")
}
createSnapshotRequest := ecs.CreateCreateSnapshotRequest()
createSnapshotRequest.DiskId = disks[0].DiskId
snapshot, err := client.CreateSnapshot(createSnapshotRequest)
if err != nil {
return halt(state, err, "Error creating snapshot")
}
// Create the alicloud snapshot
ui.Say(fmt.Sprintf("Creating snapshot from system disk %s: %s", disks[0].DiskId, snapshot.SnapshotId))
snapshotsResponse, err := client.WaitForSnapshotStatus(config.AlicloudRegion, snapshot.SnapshotId, SnapshotStatusAccomplished, time.Duration(s.WaitSnapshotReadyTimeout)*time.Second)
if err != nil {
_, ok := err.(errors.Error)
if ok {
return halt(state, err, "Error querying created snapshot")
}
return halt(state, err, "Timeout waiting for snapshot to be created")
}
snapshots := snapshotsResponse.(*ecs.DescribeSnapshotsResponse).Snapshots.Snapshot
if len(snapshots) == 0 {
return halt(state, err, "Unable to find created snapshot")
}
s.snapshot = &snapshots[0]
state.Put("alicloudsnapshot", snapshot.SnapshotId)
return multistep.ActionContinue
}
func (s *stepCreateAlicloudSnapshot) Cleanup(state multistep.StateBag) {
if s.snapshot == nil {
return
}
_, cancelled := state.GetOk(multistep.StateCancelled)
_, halted := state.GetOk(multistep.StateHalted)
if !cancelled && !halted {
return
}
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
ui.Say("Deleting the snapshot because of cancellation or error...")
deleteSnapshotRequest := ecs.CreateDeleteSnapshotRequest()
deleteSnapshotRequest.SnapshotId = s.snapshot.SnapshotId
if _, err := client.DeleteSnapshot(deleteSnapshotRequest); err != nil {
ui.Error(fmt.Sprintf("Error deleting snapshot, it may still be around: %s", err))
return
}
}

View File

@ -0,0 +1,65 @@
package ecs
import (
"context"
"fmt"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
type stepCreateTags struct {
Tags map[string]string
}
func (s *stepCreateTags) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
imageId := state.Get("alicloudimage").(string)
snapshotIds := state.Get("alicloudsnapshots").([]string)
if len(s.Tags) == 0 {
return multistep.ActionContinue
}
ui.Say(fmt.Sprintf("Adding tags(%s) to image: %s", s.Tags, imageId))
var tags []ecs.AddTagsTag
for key, value := range s.Tags {
var tag ecs.AddTagsTag
tag.Key = key
tag.Value = value
tags = append(tags, tag)
}
addTagsRequest := ecs.CreateAddTagsRequest()
addTagsRequest.RegionId = config.AlicloudRegion
addTagsRequest.ResourceId = imageId
addTagsRequest.ResourceType = TagResourceImage
addTagsRequest.Tag = &tags
if _, err := client.AddTags(addTagsRequest); err != nil {
return halt(state, err, "Error Adding tags to image")
}
for _, snapshotId := range snapshotIds {
ui.Say(fmt.Sprintf("Adding tags(%s) to snapshot: %s", s.Tags, snapshotId))
addTagsRequest := ecs.CreateAddTagsRequest()
addTagsRequest.RegionId = config.AlicloudRegion
addTagsRequest.ResourceId = snapshotId
addTagsRequest.ResourceType = TagResourceSnapshot
addTagsRequest.Tag = &tags
if _, err := client.AddTags(addTagsRequest); err != nil {
return halt(state, err, "Error Adding tags to snapshot")
}
}
return multistep.ActionContinue
}
func (s *stepCreateTags) Cleanup(state multistep.StateBag) {
// Nothing need to do, tags will be cleaned when the resource is cleaned
}

View File

@ -1,63 +1,101 @@
package ecs
import (
"context"
"fmt"
"log"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type stepDeleteAlicloudImageSnapshots struct {
AlicloudImageForceDetele bool
AlicloudImageForceDeteleSnapshots bool
AlicloudImageForceDelete bool
AlicloudImageForceDeleteSnapshots bool
AlicloudImageName string
AlicloudImageDestinationRegions []string
AlicloudImageDestinationNames []string
}
func (s *stepDeleteAlicloudImageSnapshots) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ecs.Client)
ui := state.Get("ui").(packer.Ui)
config := state.Get("config").(Config)
ui.Say("Deleting image snapshots.")
func (s *stepDeleteAlicloudImageSnapshots) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
// Check for force delete
if s.AlicloudImageForceDetele {
images, _, err := client.DescribeImages(&ecs.DescribeImagesArgs{
RegionId: common.Region(config.AlicloudRegion),
ImageName: s.AlicloudImageName,
})
if len(images) < 1 {
return multistep.ActionContinue
}
for _, image := range images {
if image.ImageOwnerAlias != string(ecs.ImageOwnerSelf) {
log.Printf("You can only delete instances based on customized images %s ", image.ImageId)
continue
}
err = client.DeleteImage(common.Region(config.AlicloudRegion), image.ImageId)
if err != nil {
err := fmt.Errorf("Failed to delete image: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
if s.AlicloudImageForceDeteleSnapshots {
for _, diskDevice := range image.DiskDeviceMappings.DiskDeviceMapping {
if err := client.DeleteSnapshot(diskDevice.SnapshotId); err != nil {
err := fmt.Errorf("Deleting ECS snapshot failed: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
}
}
if s.AlicloudImageForceDelete {
err := s.deleteImageAndSnapshots(state, s.AlicloudImageName, config.AlicloudRegion)
if err != nil {
return halt(state, err, "")
}
numberOfName := len(s.AlicloudImageDestinationNames)
if numberOfName == 0 {
return multistep.ActionContinue
}
for index, destinationRegion := range s.AlicloudImageDestinationRegions {
if destinationRegion == config.AlicloudRegion {
continue
}
if index < numberOfName {
err = s.deleteImageAndSnapshots(state, s.AlicloudImageDestinationNames[index], destinationRegion)
if err != nil {
return halt(state, err, "")
}
} else {
break
}
}
}
return multistep.ActionContinue
}
func (s *stepDeleteAlicloudImageSnapshots) deleteImageAndSnapshots(state multistep.StateBag, imageName string, region string) error {
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
describeImagesRequest := ecs.CreateDescribeImagesRequest()
describeImagesRequest.RegionId = region
describeImagesRequest.ImageName = imageName
describeImagesRequest.Status = ImageStatusQueried
imageResponse, _ := client.DescribeImages(describeImagesRequest)
images := imageResponse.Images.Image
if len(images) < 1 {
return nil
}
ui.Say(fmt.Sprintf("Deleting duplicated image and snapshot in %s: %s", region, imageName))
for _, image := range images {
if image.ImageOwnerAlias != ImageOwnerSelf {
log.Printf("You can not delete non-customized images: %s ", image.ImageId)
continue
}
deleteImageRequest := ecs.CreateDeleteImageRequest()
deleteImageRequest.RegionId = region
deleteImageRequest.ImageId = image.ImageId
if _, err := client.DeleteImage(deleteImageRequest); err != nil {
err := fmt.Errorf("Failed to delete image: %s", err)
return err
}
if s.AlicloudImageForceDeleteSnapshots {
for _, diskDevice := range image.DiskDeviceMappings.DiskDeviceMapping {
deleteSnapshotRequest := ecs.CreateDeleteSnapshotRequest()
deleteSnapshotRequest.SnapshotId = diskDevice.SnapshotId
if _, err := client.DeleteSnapshot(deleteSnapshotRequest); err != nil {
err := fmt.Errorf("Deleting ECS snapshot failed: %s", err)
return err
}
}
}
}
return nil
}
func (s *stepDeleteAlicloudImageSnapshots) Cleanup(state multistep.StateBag) {
}

View File

@ -1,71 +0,0 @@
package ecs
import (
"fmt"
"github.com/denverdino/aliyungo/ecs"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type stepMountAlicloudDisk struct {
}
func (s *stepMountAlicloudDisk) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ecs.Client)
config := state.Get("config").(Config)
ui := state.Get("ui").(packer.Ui)
instance := state.Get("instance").(*ecs.InstanceAttributesType)
alicloudDiskDevices := config.ECSImagesDiskMappings
if len(config.ECSImagesDiskMappings) == 0 {
return multistep.ActionContinue
}
ui.Say("Mounting disks.")
disks, _, err := client.DescribeDisks(&ecs.DescribeDisksArgs{InstanceId: instance.InstanceId,
RegionId: instance.RegionId})
if err != nil {
err := fmt.Errorf("Error querying disks: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
for _, disk := range disks {
if disk.Status == ecs.DiskStatusAvailable {
if err := client.AttachDisk(&ecs.AttachDiskArgs{DiskId: disk.DiskId,
InstanceId: instance.InstanceId,
Device: getDevice(&disk, alicloudDiskDevices),
}); err != nil {
err := fmt.Errorf("Error mounting disks: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
}
}
for _, disk := range disks {
if err := client.WaitForDisk(instance.RegionId, disk.DiskId, ecs.DiskStatusInUse, ALICLOUD_DEFAULT_SHORT_TIMEOUT); err != nil {
err := fmt.Errorf("Timeout waiting for mount: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
}
ui.Say("Finished mounting disks.")
return multistep.ActionContinue
}
func (s *stepMountAlicloudDisk) Cleanup(state multistep.StateBag) {
}
func getDevice(disk *ecs.DiskItemType, diskDevices []AlicloudDiskDevice) string {
if disk.Device != "" {
return disk.Device
}
for _, alicloudDiskDevice := range diskDevices {
if alicloudDiskDevice.DiskName == disk.DiskName || alicloudDiskDevice.SnapshotId == disk.SourceSnapshotId {
return alicloudDiskDevice.Device
}
}
return ""
}

View File

@ -1,12 +1,12 @@
package ecs
import (
"context"
"fmt"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type stepPreValidate struct {
@ -14,35 +14,74 @@ type stepPreValidate struct {
ForceDelete bool
}
func (s *stepPreValidate) Run(state multistep.StateBag) multistep.StepAction {
ui := state.Get("ui").(packer.Ui)
if s.ForceDelete {
ui.Say("Force delete flag found, skipping prevalidating image name.")
return multistep.ActionContinue
func (s *stepPreValidate) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
if err := s.validateRegions(state); err != nil {
return halt(state, err, "")
}
client := state.Get("client").(*ecs.Client)
config := state.Get("config").(Config)
ui.Say("Prevalidating image name...")
images, _, err := client.DescribeImages(&ecs.DescribeImagesArgs{
ImageName: s.AlicloudDestImageName,
RegionId: common.Region(config.AlicloudRegion)})
if err != nil {
err := fmt.Errorf("Error querying alicloud image: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
if len(images) > 0 {
err := fmt.Errorf("Error: name conflicts with an existing alicloud image: %s", images[0].ImageId)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
if err := s.validateDestImageName(state); err != nil {
return halt(state, err, "")
}
return multistep.ActionContinue
}
func (s *stepPreValidate) validateRegions(state multistep.StateBag) error {
ui := state.Get("ui").(packer.Ui)
config := state.Get("config").(*Config)
if config.AlicloudSkipValidation {
ui.Say("Skip region validation flag found, skipping prevalidating source region and copied regions.")
return nil
}
ui.Say("Prevalidating source region and copied regions...")
var errs *packer.MultiError
if err := config.ValidateRegion(config.AlicloudRegion); err != nil {
errs = packer.MultiErrorAppend(errs, err)
}
for _, region := range config.AlicloudImageDestinationRegions {
if err := config.ValidateRegion(region); err != nil {
errs = packer.MultiErrorAppend(errs, err)
}
}
if errs != nil && len(errs.Errors) > 0 {
return errs
}
return nil
}
func (s *stepPreValidate) validateDestImageName(state multistep.StateBag) error {
ui := state.Get("ui").(packer.Ui)
client := state.Get("client").(*ClientWrapper)
config := state.Get("config").(*Config)
if s.ForceDelete {
ui.Say("Force delete flag found, skipping prevalidating image name.")
return nil
}
ui.Say("Prevalidating image name...")
describeImagesRequest := ecs.CreateDescribeImagesRequest()
describeImagesRequest.RegionId = config.AlicloudRegion
describeImagesRequest.ImageName = s.AlicloudDestImageName
describeImagesRequest.Status = ImageStatusQueried
imagesResponse, err := client.DescribeImages(describeImagesRequest)
if err != nil {
return fmt.Errorf("Error querying alicloud image: %s", err)
}
images := imagesResponse.Images.Image
if len(images) > 0 {
return fmt.Errorf("Error: Image Name: '%s' is used by an existing alicloud image: %s", images[0].ImageName, images[0].ImageId)
}
return nil
}
func (s *stepPreValidate) Cleanup(multistep.StateBag) {}

View File

@ -1,71 +1,106 @@
package ecs
import (
"context"
"fmt"
"time"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
confighelper "github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type setpRegionCopyAlicloudImage struct {
type stepRegionCopyAlicloudImage struct {
AlicloudImageDestinationRegions []string
AlicloudImageDestinationNames []string
RegionId string
}
func (s *setpRegionCopyAlicloudImage) Run(state multistep.StateBag) multistep.StepAction {
func (s *stepRegionCopyAlicloudImage) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
if config.ImageEncrypted != confighelper.TriUnset {
s.AlicloudImageDestinationRegions = append(s.AlicloudImageDestinationRegions, s.RegionId)
s.AlicloudImageDestinationNames = append(s.AlicloudImageDestinationNames, config.AlicloudImageName)
}
if len(s.AlicloudImageDestinationRegions) == 0 {
return multistep.ActionContinue
}
client := state.Get("client").(*ecs.Client)
ui := state.Get("ui").(packer.Ui)
imageId := state.Get("alicloudimage").(string)
alicloudImages := state.Get("alicloudimages").(map[string]string)
region := common.Region(s.RegionId)
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
srcImageId := state.Get("alicloudimage").(string)
alicloudImages := state.Get("alicloudimages").(map[string]string)
numberOfName := len(s.AlicloudImageDestinationNames)
ui.Say(fmt.Sprintf("Coping image %s from %s...", srcImageId, s.RegionId))
for index, destinationRegion := range s.AlicloudImageDestinationRegions {
if destinationRegion == s.RegionId {
if destinationRegion == s.RegionId && config.ImageEncrypted == confighelper.TriUnset {
continue
}
ecsImageName := ""
if numberOfName > 0 && index < numberOfName {
ecsImageName = s.AlicloudImageDestinationNames[index]
}
imageId, err := client.CopyImage(
&ecs.CopyImageArgs{
RegionId: region,
ImageId: imageId,
DestinationRegionId: common.Region(destinationRegion),
DestinationImageName: ecsImageName,
})
if err != nil {
state.Put("error", err)
ui.Say(fmt.Sprintf("Error copying images: %s", err))
return multistep.ActionHalt
copyImageRequest := ecs.CreateCopyImageRequest()
copyImageRequest.RegionId = s.RegionId
copyImageRequest.ImageId = srcImageId
copyImageRequest.DestinationRegionId = destinationRegion
copyImageRequest.DestinationImageName = ecsImageName
if config.ImageEncrypted != confighelper.TriUnset {
copyImageRequest.Encrypted = requests.NewBoolean(config.ImageEncrypted.True())
}
alicloudImages[destinationRegion] = imageId
imageResponse, err := client.CopyImage(copyImageRequest)
if err != nil {
return halt(state, err, "Error copying images")
}
alicloudImages[destinationRegion] = imageResponse.ImageId
ui.Message(fmt.Sprintf("Copy image from %s(%s) to %s(%s)", s.RegionId, srcImageId, destinationRegion, imageResponse.ImageId))
}
if config.ImageEncrypted != confighelper.TriUnset {
if _, err := client.WaitForImageStatus(s.RegionId, alicloudImages[s.RegionId], ImageStatusAvailable, time.Duration(ALICLOUD_DEFAULT_LONG_TIMEOUT)*time.Second); err != nil {
return halt(state, err, fmt.Sprintf("Timeout waiting image %s finish copying", alicloudImages[s.RegionId]))
}
}
return multistep.ActionContinue
}
func (s *setpRegionCopyAlicloudImage) Cleanup(state multistep.StateBag) {
func (s *stepRegionCopyAlicloudImage) Cleanup(state multistep.StateBag) {
_, cancelled := state.GetOk(multistep.StateCancelled)
_, halted := state.GetOk(multistep.StateHalted)
if cancelled || halted {
ui := state.Get("ui").(packer.Ui)
client := state.Get("client").(*ecs.Client)
alicloudImages := state.Get("alicloudimages").(map[string]string)
ui.Say(fmt.Sprintf("Stopping copy image because cancellation or error..."))
for copiedRegionId, copiedImageId := range alicloudImages {
if copiedRegionId == s.RegionId {
continue
}
if err := client.CancelCopyImage(common.Region(copiedRegionId), copiedImageId); err != nil {
ui.Say(fmt.Sprintf("Error cancelling copy image: %v", err))
}
if !cancelled && !halted {
return
}
ui := state.Get("ui").(packer.Ui)
ui.Say(fmt.Sprintf("Stopping copy image because cancellation or error..."))
client := state.Get("client").(*ClientWrapper)
alicloudImages := state.Get("alicloudimages").(map[string]string)
srcImageId := state.Get("alicloudimage").(string)
for copiedRegionId, copiedImageId := range alicloudImages {
if copiedImageId == srcImageId {
continue
}
cancelCopyImageRequest := ecs.CreateCancelCopyImageRequest()
cancelCopyImageRequest.RegionId = copiedRegionId
cancelCopyImageRequest.ImageId = copiedImageId
if _, err := client.CancelCopyImage(cancelCopyImageRequest); err != nil {
ui.Error(fmt.Sprintf("Error cancelling copy image: %v", err))
}
}
}

View File

@ -1,35 +1,34 @@
package ecs
import (
"context"
"fmt"
"github.com/denverdino/aliyungo/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type stepRunAlicloudInstance struct {
}
func (s *stepRunAlicloudInstance) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ecs.Client)
func (s *stepRunAlicloudInstance) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ClientWrapper)
ui := state.Get("ui").(packer.Ui)
instance := state.Get("instance").(*ecs.InstanceAttributesType)
instance := state.Get("instance").(*ecs.Instance)
err := client.StartInstance(instance.InstanceId)
if err != nil {
err := fmt.Errorf("Error starting instance: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
startInstanceRequest := ecs.CreateStartInstanceRequest()
startInstanceRequest.InstanceId = instance.InstanceId
if _, err := client.StartInstance(startInstanceRequest); err != nil {
return halt(state, err, "Error starting instance")
}
ui.Say("Starting instance.")
err = client.WaitForInstance(instance.InstanceId, ecs.Running, ALICLOUD_DEFAULT_TIMEOUT)
ui.Say(fmt.Sprintf("Starting instance: %s", instance.InstanceId))
_, err := client.WaitForInstanceStatus(instance.RegionId, instance.InstanceId, InstanceStatusRunning)
if err != nil {
err := fmt.Errorf("Timeout waiting for instance to start: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
return halt(state, err, "Timeout waiting for instance to start")
}
return multistep.ActionContinue
@ -38,19 +37,36 @@ func (s *stepRunAlicloudInstance) Run(state multistep.StateBag) multistep.StepAc
func (s *stepRunAlicloudInstance) Cleanup(state multistep.StateBag) {
_, cancelled := state.GetOk(multistep.StateCancelled)
_, halted := state.GetOk(multistep.StateHalted)
if cancelled || halted {
ui := state.Get("ui").(packer.Ui)
client := state.Get("client").(*ecs.Client)
instance := state.Get("instance").(*ecs.InstanceAttributesType)
instanceAttrubite, _ := client.DescribeInstanceAttribute(instance.InstanceId)
if instanceAttrubite.Status == ecs.Starting || instanceAttrubite.Status == ecs.Running {
if err := client.StopInstance(instance.InstanceId, true); err != nil {
ui.Say(fmt.Sprintf("Error stopping instance %s, it may still be around %s", instance.InstanceId, err))
return
}
if err := client.WaitForInstance(instance.InstanceId, ecs.Stopped, ALICLOUD_DEFAULT_TIMEOUT); err != nil {
ui.Say(fmt.Sprintf("Error stopping instance %s, it may still be around %s", instance.InstanceId, err))
}
if !cancelled && !halted {
return
}
ui := state.Get("ui").(packer.Ui)
client := state.Get("client").(*ClientWrapper)
instance := state.Get("instance").(*ecs.Instance)
describeInstancesRequest := ecs.CreateDescribeInstancesRequest()
describeInstancesRequest.InstanceIds = fmt.Sprintf("[\"%s\"]", instance.InstanceId)
instancesResponse, _ := client.DescribeInstances(describeInstancesRequest)
if len(instancesResponse.Instances.Instance) == 0 {
return
}
instanceAttribute := instancesResponse.Instances.Instance[0]
if instanceAttribute.Status == InstanceStatusStarting || instanceAttribute.Status == InstanceStatusRunning {
stopInstanceRequest := ecs.CreateStopInstanceRequest()
stopInstanceRequest.InstanceId = instance.InstanceId
stopInstanceRequest.ForceStop = requests.NewBoolean(true)
if _, err := client.StopInstance(stopInstanceRequest); err != nil {
ui.Say(fmt.Sprintf("Error stopping instance %s, it may still be around %s", instance.InstanceId, err))
return
}
_, err := client.WaitForInstanceStatus(instance.RegionId, instance.InstanceId, InstanceStatusStopped)
if err != nil {
ui.Say(fmt.Sprintf("Error stopping instance %s, it may still be around %s", instance.InstanceId, err))
}
}
}

View File

@ -1,60 +1,60 @@
package ecs
import (
"context"
"fmt"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type setpShareAlicloudImage struct {
type stepShareAlicloudImage struct {
AlicloudImageShareAccounts []string
AlicloudImageUNShareAccounts []string
RegionId string
}
func (s *setpShareAlicloudImage) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ecs.Client)
ui := state.Get("ui").(packer.Ui)
func (s *stepShareAlicloudImage) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ClientWrapper)
alicloudImages := state.Get("alicloudimages").(map[string]string)
for copiedRegion, copiedImageId := range alicloudImages {
err := client.ModifyImageSharePermission(
&ecs.ModifyImageSharePermissionArgs{
RegionId: common.Region(copiedRegion),
ImageId: copiedImageId,
AddAccount: s.AlicloudImageShareAccounts,
RemoveAccount: s.AlicloudImageUNShareAccounts,
})
if err != nil {
state.Put("error", err)
ui.Say(fmt.Sprintf("Failed modifying image share permissions: %s", err))
return multistep.ActionHalt
for regionId, imageId := range alicloudImages {
modifyImageShareRequest := ecs.CreateModifyImageSharePermissionRequest()
modifyImageShareRequest.RegionId = regionId
modifyImageShareRequest.ImageId = imageId
modifyImageShareRequest.AddAccount = &s.AlicloudImageShareAccounts
modifyImageShareRequest.RemoveAccount = &s.AlicloudImageUNShareAccounts
if _, err := client.ModifyImageSharePermission(modifyImageShareRequest); err != nil {
return halt(state, err, "Failed modifying image share permissions")
}
}
return multistep.ActionContinue
}
func (s *setpShareAlicloudImage) Cleanup(state multistep.StateBag) {
func (s *stepShareAlicloudImage) Cleanup(state multistep.StateBag) {
_, cancelled := state.GetOk(multistep.StateCancelled)
_, halted := state.GetOk(multistep.StateHalted)
if cancelled || halted {
ui := state.Get("ui").(packer.Ui)
client := state.Get("client").(*ecs.Client)
alicloudImages := state.Get("alicloudimages").(map[string]string)
ui.Say("Restoring image share permission because cancellations or error...")
for copiedRegion, copiedImageId := range alicloudImages {
err := client.ModifyImageSharePermission(
&ecs.ModifyImageSharePermissionArgs{
RegionId: common.Region(copiedRegion),
ImageId: copiedImageId,
AddAccount: s.AlicloudImageUNShareAccounts,
RemoveAccount: s.AlicloudImageShareAccounts,
})
if err != nil {
ui.Say(fmt.Sprintf("Restoring image share permission failed: %s", err))
}
if !cancelled && !halted {
return
}
ui := state.Get("ui").(packer.Ui)
client := state.Get("client").(*ClientWrapper)
alicloudImages := state.Get("alicloudimages").(map[string]string)
ui.Say("Restoring image share permission because cancellations or error...")
for regionId, imageId := range alicloudImages {
modifyImageShareRequest := ecs.CreateModifyImageSharePermissionRequest()
modifyImageShareRequest.RegionId = regionId
modifyImageShareRequest.ImageId = imageId
modifyImageShareRequest.AddAccount = &s.AlicloudImageUNShareAccounts
modifyImageShareRequest.RemoveAccount = &s.AlicloudImageShareAccounts
if _, err := client.ModifyImageSharePermission(modifyImageShareRequest); err != nil {
ui.Say(fmt.Sprintf("Restoring image share permission failed: %s", err))
}
}
}

View File

@ -1,38 +1,43 @@
package ecs
import (
"context"
"fmt"
"strconv"
"github.com/denverdino/aliyungo/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type stepStopAlicloudInstance struct {
ForceStop bool
ForceStop bool
DisableStop bool
}
func (s *stepStopAlicloudInstance) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ecs.Client)
instance := state.Get("instance").(*ecs.InstanceAttributesType)
func (s *stepStopAlicloudInstance) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ClientWrapper)
instance := state.Get("instance").(*ecs.Instance)
ui := state.Get("ui").(packer.Ui)
err := client.StopInstance(instance.InstanceId, s.ForceStop)
if err != nil {
if err != nil {
err := fmt.Errorf("Error stopping alicloud instance: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
if !s.DisableStop {
ui.Say(fmt.Sprintf("Stopping instance: %s", instance.InstanceId))
stopInstanceRequest := ecs.CreateStopInstanceRequest()
stopInstanceRequest.InstanceId = instance.InstanceId
stopInstanceRequest.ForceStop = requests.Boolean(strconv.FormatBool(s.ForceStop))
if _, err := client.StopInstance(stopInstanceRequest); err != nil {
return halt(state, err, "Error stopping alicloud instance")
}
}
err = client.WaitForInstance(instance.InstanceId, ecs.Stopped, ALICLOUD_DEFAULT_TIMEOUT)
ui.Say(fmt.Sprintf("Waiting instance stopped: %s", instance.InstanceId))
_, err := client.WaitForInstanceStatus(instance.RegionId, instance.InstanceId, InstanceStatusStopped)
if err != nil {
err := fmt.Errorf("Error waiting for alicloud instance to stop: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
return halt(state, err, "Error waiting for alicloud instance to stop")
}
return multistep.ActionContinue

View File

@ -0,0 +1,69 @@
//go:generate struct-markdown
package chroot
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
awscommon "github.com/hashicorp/packer/builder/amazon/common"
"github.com/hashicorp/packer/template/interpolate"
)
type BlockDevice struct {
awscommon.BlockDevice `mapstructure:",squash"`
// ID, alias or ARN of the KMS key to use for boot volume encryption. This
// only applies to the main region, other regions where the AMI will be
// copied will be encrypted by the default EBS KMS key. For valid formats
// see KmsKeyId in the [AWS API docs -
// CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html)
// This field is validated by Packer, when using an alias, you will have to
// prefix kms_key_id with alias/.
KmsKeyId string `mapstructure:"kms_key_id" required:"false"`
}
type BlockDevices []BlockDevice
func (bds BlockDevices) BuildEC2BlockDeviceMappings() []*ec2.BlockDeviceMapping {
var blockDevices []*ec2.BlockDeviceMapping
for _, blockDevice := range bds {
blockDevices = append(blockDevices, blockDevice.BuildEC2BlockDeviceMapping())
}
return blockDevices
}
func (blockDevice BlockDevice) BuildEC2BlockDeviceMapping() *ec2.BlockDeviceMapping {
mapping := blockDevice.BlockDevice.BuildEC2BlockDeviceMapping()
if blockDevice.KmsKeyId != "" {
mapping.Ebs.KmsKeyId = aws.String(blockDevice.KmsKeyId)
}
return mapping
}
func (b *BlockDevice) Prepare(ctx *interpolate.Context) error {
err := b.BlockDevice.Prepare(ctx)
if err != nil {
return err
}
// Warn that encrypted must be true when setting kms_key_id
if b.KmsKeyId != "" && b.Encrypted.True() {
return fmt.Errorf("The device %v, must also have `encrypted: "+
"true` when setting a kms_key_id.", b.DeviceName)
}
_, err = interpolate.RenderInterface(&b, ctx)
return err
}
func (bds BlockDevices) Prepare(ctx *interpolate.Context) (errs []error) {
for _, block := range bds {
if err := block.Prepare(ctx); err != nil {
errs = append(errs, err)
}
}
return errs
}

View File

@ -1,48 +1,169 @@
// The chroot package is able to create an Amazon AMI without requiring
// the launch of a new instance for every build. It does this by attaching
// and mounting the root volume of another AMI and chrooting into that
// directory. It then creates an AMI from that attached drive.
//go:generate struct-markdown
// The chroot package is able to create an Amazon AMI without requiring the
// launch of a new instance for every build. It does this by attaching and
// mounting the root volume of another AMI and chrooting into that directory.
// It then creates an AMI from that attached drive.
package chroot
import (
"context"
"errors"
"log"
"runtime"
"github.com/aws/aws-sdk-go/service/ec2"
awscommon "github.com/hashicorp/packer/builder/amazon/common"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/template/interpolate"
"github.com/mitchellh/multistep"
)
// The unique ID for this builder
const BuilderId = "mitchellh.amazon.chroot"
// Config is the configuration that is chained through the steps and
// settable from the template.
// Config is the configuration that is chained through the steps and settable
// from the template.
type Config struct {
common.PackerConfig `mapstructure:",squash"`
awscommon.AMIBlockDevices `mapstructure:",squash"`
awscommon.AMIConfig `mapstructure:",squash"`
awscommon.AccessConfig `mapstructure:",squash"`
ChrootMounts [][]string `mapstructure:"chroot_mounts"`
CommandWrapper string `mapstructure:"command_wrapper"`
CopyFiles []string `mapstructure:"copy_files"`
DevicePath string `mapstructure:"device_path"`
FromScratch bool `mapstructure:"from_scratch"`
MountOptions []string `mapstructure:"mount_options"`
MountPartition int `mapstructure:"mount_partition"`
MountPath string `mapstructure:"mount_path"`
PostMountCommands []string `mapstructure:"post_mount_commands"`
PreMountCommands []string `mapstructure:"pre_mount_commands"`
RootDeviceName string `mapstructure:"root_device_name"`
RootVolumeSize int64 `mapstructure:"root_volume_size"`
SourceAmi string `mapstructure:"source_ami"`
SourceAmiFilter awscommon.AmiFilterOptions `mapstructure:"source_ami_filter"`
common.PackerConfig `mapstructure:",squash"`
awscommon.AMIConfig `mapstructure:",squash"`
awscommon.AccessConfig `mapstructure:",squash"`
// Add one or more [block device
// mappings](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html)
// to the AMI. If this field is populated, and you are building from an
// existing source image, the block device mappings in the source image
// will be overwritten. This means you must have a block device mapping
// entry for your root volume, `root_volume_size` and `root_device_name`.
// See the [BlockDevices](#block-devices-configuration) documentation for
// fields.
AMIMappings BlockDevices `mapstructure:"ami_block_device_mappings" required:"false"`
// This is a list of devices to mount into the chroot environment. This
// configuration parameter requires some additional documentation which is
// in the Chroot Mounts section. Please read that section for more
// information on how to use this.
ChrootMounts [][]string `mapstructure:"chroot_mounts" required:"false"`
// How to run shell commands. This defaults to {{.Command}}. This may be
// useful to set if you want to set environmental variables or perhaps run
// it with sudo or so on. This is a configuration template where the
// .Command variable is replaced with the command to be run. Defaults to
// {{.Command}}.
CommandWrapper string `mapstructure:"command_wrapper" required:"false"`
// Paths to files on the running EC2 instance that will be copied into the
// chroot environment prior to provisioning. Defaults to /etc/resolv.conf
// so that DNS lookups work. Pass an empty list to skip copying
// /etc/resolv.conf. You may need to do this if you're building an image
// that uses systemd.
CopyFiles []string `mapstructure:"copy_files" required:"false"`
// The path to the device where the root volume of the source AMI will be
// attached. This defaults to "" (empty string), which forces Packer to
// find an open device automatically.
DevicePath string `mapstructure:"device_path" required:"false"`
// When we call the mount command (by default mount -o device dir), the
// string provided in nvme_mount_path will replace device in that command.
// When this option is not set, device in that command will be something
// like /dev/sdf1, mirroring the attached device name. This assumption
// works for most instances but will fail with c5 and m5 instances. In
// order to use the chroot builder with c5 and m5 instances, you must
// manually set nvme_device_path and device_path.
NVMEDevicePath string `mapstructure:"nvme_device_path" required:"false"`
// Build a new volume instead of starting from an existing AMI root volume
// snapshot. Default false. If true, source_ami is no longer used and the
// following options become required: ami_virtualization_type,
// pre_mount_commands and root_volume_size. The below options are also
// required in this mode only:
FromScratch bool `mapstructure:"from_scratch" required:"false"`
// Options to supply the mount command when mounting devices. Each option
// will be prefixed with -o and supplied to the mount command ran by
// Packer. Because this command is ran in a shell, user discretion is
// advised. See this manual page for the mount command for valid file
// system specific options.
MountOptions []string `mapstructure:"mount_options" required:"false"`
// The partition number containing the / partition. By default this is the
// first partition of the volume, (for example, xvda1) but you can
// designate the entire block device by setting "mount_partition": "0" in
// your config, which will mount xvda instead.
MountPartition string `mapstructure:"mount_partition" required:"false"`
// The path where the volume will be mounted. This is where the chroot
// environment will be. This defaults to
// /mnt/packer-amazon-chroot-volumes/{{.Device}}. This is a configuration
// template where the .Device variable is replaced with the name of the
// device where the volume is attached.
MountPath string `mapstructure:"mount_path" required:"false"`
// As pre_mount_commands, but the commands are executed after mounting the
// root device and before the extra mount and copy steps. The device and
// mount path are provided by {{.Device}} and {{.MountPath}}.
PostMountCommands []string `mapstructure:"post_mount_commands" required:"false"`
// A series of commands to execute after attaching the root volume and
// before mounting the chroot. This is not required unless using
// from_scratch. If so, this should include any partitioning and filesystem
// creation commands. The path to the device is provided by {{.Device}}.
PreMountCommands []string `mapstructure:"pre_mount_commands" required:"false"`
// The root device name. For example, xvda.
RootDeviceName string `mapstructure:"root_device_name" required:"false"`
// The size of the root volume in GB for the chroot environment and the
// resulting AMI. Default size is the snapshot size of the source_ami
// unless from_scratch is true, in which case this field must be defined.
RootVolumeSize int64 `mapstructure:"root_volume_size" required:"false"`
// The type of EBS volume for the chroot environment and resulting AMI. The
// default value is the type of the source_ami, unless from_scratch is
// true, in which case the default value is gp2. You can only specify io1
// if building based on top of a source_ami which is also io1.
RootVolumeType string `mapstructure:"root_volume_type" required:"false"`
// The source AMI whose root volume will be copied and provisioned on the
// currently running instance. This must be an EBS-backed AMI with a root
// volume snapshot that you have access to. Note: this is not used when
// from_scratch is set to true.
SourceAmi string `mapstructure:"source_ami" required:"true"`
// Filters used to populate the source_ami field. Example:
//
//
// ``` json
// {
// "source_ami_filter": {
// "filters": {
// "virtualization-type": "hvm",
// "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
// "root-device-type": "ebs"
// },
// "owners": ["099720109477"],
// "most_recent": true
// }
// }
// ```
//
// This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
// This will fail unless *exactly* one AMI is returned. In the above example,
// `most_recent` will cause this to succeed by selecting the newest image.
//
// - `filters` (map of strings) - filters used to select a `source_ami`.
// NOTE: This will fail unless *exactly* one AMI is returned. Any filter
// described in the docs for
// [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
// is valid.
//
// - `owners` (array of strings) - Filters the images by their owner. You
// may specify one or more AWS account IDs, "self" (which will use the
// account whose credentials you are using to run Packer), or an AWS owner
// alias: for example, "amazon", "aws-marketplace", or "microsoft". This
// option is required for security reasons.
//
// - `most_recent` (boolean) - Selects the newest created image when true.
// This is most useful for selecting a daily distro build.
//
// You may set this in place of `source_ami` or in conjunction with it. If you
// set this in conjunction with `source_ami`, the `source_ami` will be added
// to the filter. The provided `source_ami` must meet all of the filtering
// criteria provided in `source_ami_filter`; this pins the AMI returned by the
// filter, but will cause Packer to fail if the `source_ami` does not exist.
SourceAmiFilter awscommon.AmiFilterOptions `mapstructure:"source_ami_filter" required:"false"`
// Tags to apply to the volumes that are *launched*. This is a [template
// engine](/docs/templates/engine.html), see [Build template
// data](#build-template-data) for more information.
RootVolumeTags awscommon.TagMap `mapstructure:"root_volume_tags" required:"false"`
// what architecture to use when registering the final AMI; valid options
// are "x86_64" or "arm64". Defaults to "x86_64".
Architecture string `mapstructure:"ami_architecture" required:"false"`
ctx interpolate.Context
}
@ -66,6 +187,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
"ami_description",
"snapshot_tags",
"tags",
"root_volume_tags",
"command_wrapper",
"post_mount_commands",
"pre_mount_commands",
@ -77,6 +199,10 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
return nil, err
}
if b.config.Architecture == "" {
b.config.Architecture = "x86_64"
}
if b.config.PackerConfig.PackerForce {
b.config.AMIForceDeregister = true
}
@ -98,7 +224,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
// set default copy file if we're not giving our own
if b.config.CopyFiles == nil {
b.config.CopyFiles = make([]string, 0)
if !b.config.FromScratch {
b.config.CopyFiles = []string{"/etc/resolv.conf"}
}
@ -112,8 +237,8 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
b.config.MountPath = "/mnt/packer-amazon-chroot-volumes/{{.Device}}"
}
if b.config.MountPartition == 0 {
b.config.MountPartition = 1
if b.config.MountPartition == "" {
b.config.MountPartition = "1"
}
// Accumulate any errors or warnings
@ -161,23 +286,42 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
errs = packer.MultiErrorAppend(
errs, errors.New("source_ami or source_ami_filter is required."))
}
if len(b.config.AMIMappings) != 0 {
warns = append(warns, "ami_block_device_mappings are unused when from_scratch is false")
if len(b.config.AMIMappings) > 0 && b.config.RootDeviceName != "" {
if b.config.RootVolumeSize == 0 {
// Although, they can specify the device size in the block
// device mapping, it's easier to be specific here.
errs = packer.MultiErrorAppend(
errs, errors.New("root_volume_size is required if ami_block_device_mappings is specified"))
}
warns = append(warns, "ami_block_device_mappings from source image will be completely overwritten")
} else if len(b.config.AMIMappings) > 0 {
errs = packer.MultiErrorAppend(
errs, errors.New("If ami_block_device_mappings is specified, root_device_name must be specified"))
} else if b.config.RootDeviceName != "" {
errs = packer.MultiErrorAppend(
errs, errors.New("If root_device_name is specified, ami_block_device_mappings must be specified"))
}
if b.config.RootDeviceName != "" {
warns = append(warns, "root_device_name is unused when from_scratch is false")
}
valid := false
for _, validArch := range []string{"x86_64", "arm64"} {
if validArch == b.config.Architecture {
valid = true
break
}
}
if !valid {
errs = packer.MultiErrorAppend(errs, errors.New(`The only valid ami_architecture values are "x86_64" and "arm64"`))
}
if errs != nil && len(errs.Errors) > 0 {
return warns, errs
}
log.Println(common.ScrubConfig(b.config, b.config.AccessKey, b.config.SecretKey, b.config.Token))
packer.LogSecretFilter.Set(b.config.AccessKey, b.config.SecretKey, b.config.Token)
return warns, nil
}
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (packer.Artifact, error) {
if runtime.GOOS != "linux" {
return nil, errors.New("The amazon-chroot builder only works on Linux environments.")
}
@ -189,14 +333,16 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
ec2conn := ec2.New(session)
wrappedCommand := func(command string) (string, error) {
ctx := b.config.ctx
ctx.Data = &wrappedCommandTemplate{Command: command}
return interpolate.Render(b.config.CommandWrapper, &ctx)
ictx := b.config.ctx
ictx.Data = &wrappedCommandTemplate{Command: command}
return interpolate.Render(b.config.CommandWrapper, &ictx)
}
// Setup the state bag and initial state for the steps
state := new(multistep.BasicStateBag)
state.Put("config", &b.config)
state.Put("access_config", &b.config.AccessConfig)
state.Put("ami_config", &b.config.AMIConfig)
state.Put("ec2", ec2conn)
state.Put("awsSession", session)
state.Put("hook", hook)
@ -219,6 +365,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
EnableAMISriovNetSupport: b.config.AMISriovNetSupport,
EnableAMIENASupport: b.config.AMIENASupport,
AmiFilters: b.config.SourceAmiFilter,
AMIVirtType: b.config.AMIVirtType,
},
&StepCheckRootDevice{},
)
@ -228,7 +375,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
&StepFlock{},
&StepPrepareDevice{},
&StepCreateVolume{
RootVolumeType: b.config.RootVolumeType,
RootVolumeSize: b.config.RootVolumeSize,
RootVolumeTags: b.config.RootVolumeTags,
Ctx: b.config.ctx,
},
&StepAttachVolume{},
&StepEarlyUnflock{},
@ -259,18 +409,14 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
EnableAMISriovNetSupport: b.config.AMISriovNetSupport,
EnableAMIENASupport: b.config.AMIENASupport,
},
&awscommon.StepCreateEncryptedAMICopy{
KeyID: b.config.AMIKmsKeyId,
EncryptBootVolume: b.config.AMIEncryptBootVolume,
Name: b.config.AMIName,
AMIMappings: b.config.AMIBlockDevices.AMIMappings,
},
&awscommon.StepAMIRegionCopy{
AccessConfig: &b.config.AccessConfig,
Regions: b.config.AMIRegions,
AMIKmsKeyId: b.config.AMIKmsKeyId,
RegionKeyIds: b.config.AMIRegionKMSKeyIDs,
EncryptBootVolume: b.config.AMIEncryptBootVolume,
Name: b.config.AMIName,
OriginalRegion: *ec2conn.Config.Region,
},
&awscommon.StepModifyAMIAttributes{
Description: b.config.AMIDescription,
@ -290,7 +436,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
// Run!
b.runner = common.NewRunner(steps, b.config.PackerConfig, ui)
b.runner.Run(state)
b.runner.Run(ctx, state)
// If there was an error, return that
if rawErr, ok := state.GetOk("error"); ok {
@ -311,10 +457,3 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
return artifact, nil
}
func (b *Builder) Cancel() {
if b.runner != nil {
log.Println("Cancelling the step runner...")
b.runner.Cancel()
}
}

View File

@ -11,6 +11,8 @@ func testConfig() map[string]interface{} {
"ami_name": "foo",
"source_ami": "foo",
"region": "us-east-1",
// region validation logic is checked in ami_config_test
"skip_region_validation": true,
}
}
@ -28,6 +30,7 @@ func TestBuilderPrepare_AMIName(t *testing.T) {
// Test good
config["ami_name"] = "foo"
config["skip_region_validation"] = true
warnings, err := b.Prepare(config)
if len(warnings) > 0 {
t.Fatalf("bad: %#v", warnings)
@ -71,11 +74,16 @@ func TestBuilderPrepare_ChrootMounts(t *testing.T) {
if err != nil {
t.Errorf("err: %s", err)
}
}
func TestBuilderPrepare_ChrootMountsBadDefaults(t *testing.T) {
b := &Builder{}
config := testConfig()
config["chroot_mounts"] = [][]string{
{"bad"},
}
warnings, err = b.Prepare(config)
warnings, err := b.Prepare(config)
if len(warnings) > 0 {
t.Fatalf("bad: %#v", warnings)
}
@ -135,9 +143,14 @@ func TestBuilderPrepare_CopyFiles(t *testing.T) {
if len(b.config.CopyFiles) != 1 && b.config.CopyFiles[0] != "/etc/resolv.conf" {
t.Errorf("Was expecting default value for copy_files.")
}
}
func TestBuilderPrepare_CopyFilesNoDefault(t *testing.T) {
b := &Builder{}
config := testConfig()
config["copy_files"] = []string{}
warnings, err = b.Prepare(config)
warnings, err := b.Prepare(config)
if len(warnings) > 0 {
t.Fatalf("bad: %#v", warnings)
}
@ -146,6 +159,53 @@ func TestBuilderPrepare_CopyFiles(t *testing.T) {
}
if len(b.config.CopyFiles) > 0 {
t.Errorf("Was expecting no default value for copy_files.")
t.Errorf("Was expecting no default value for copy_files. Found %v",
b.config.CopyFiles)
}
}
func TestBuilderPrepare_RootDeviceNameAndAMIMappings(t *testing.T) {
var b Builder
config := testConfig()
config["root_device_name"] = "/dev/sda"
config["ami_block_device_mappings"] = []interface{}{map[string]string{}}
config["root_volume_size"] = 15
warnings, err := b.Prepare(config)
if len(warnings) == 0 {
t.Fatal("Missing warning, stating block device mappings will be overwritten")
} else if len(warnings) > 1 {
t.Fatalf("excessive warnings: %#v", warnings)
}
if err != nil {
t.Fatalf("should not have error: %s", err)
}
}
func TestBuilderPrepare_AMIMappingsNoRootDeviceName(t *testing.T) {
var b Builder
config := testConfig()
config["ami_block_device_mappings"] = []interface{}{map[string]string{}}
warnings, err := b.Prepare(config)
if len(warnings) > 0 {
t.Fatalf("bad: %#v", warnings)
}
if err == nil {
t.Fatalf("should have error")
}
}
func TestBuilderPrepare_RootDeviceNameNoAMIMappings(t *testing.T) {
var b Builder
config := testConfig()
config["root_device_name"] = "/dev/sda"
warnings, err := b.Prepare(config)
if len(warnings) > 0 {
t.Fatalf("bad: %#v", warnings)
}
if err == nil {
t.Fatalf("should have error")
}
}

View File

@ -1,7 +1,7 @@
package chroot
import (
"github.com/mitchellh/multistep"
"github.com/hashicorp/packer/helper/multistep"
)
// Cleanup is an interface that some steps implement for early cleanup.

View File

@ -2,17 +2,19 @@ package chroot
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"syscall"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/packer/tmp"
)
// Communicator is a special communicator that works by executing
@ -22,9 +24,11 @@ type Communicator struct {
CmdWrapper CommandWrapper
}
func (c *Communicator) Start(cmd *packer.RemoteCmd) error {
func (c *Communicator) Start(ctx context.Context, cmd *packer.RemoteCmd) error {
// need extra escapes for the command since we're wrapping it in quotes
cmd.Command = strconv.Quote(cmd.Command)
command, err := c.CmdWrapper(
fmt.Sprintf("chroot %s /bin/sh -c \"%s\"", c.Chroot, cmd.Command))
fmt.Sprintf("chroot %s /bin/sh -c %s", c.Chroot, cmd.Command))
if err != nil {
return err
}
@ -64,7 +68,7 @@ func (c *Communicator) Start(cmd *packer.RemoteCmd) error {
func (c *Communicator) Upload(dst string, r io.Reader, fi *os.FileInfo) error {
dst = filepath.Join(c.Chroot, dst)
log.Printf("Uploading to chroot dir: %s", dst)
tf, err := ioutil.TempFile("", "packer-amazon-chroot")
tf, err := tmp.File("packer-amazon-chroot")
if err != nil {
return fmt.Errorf("Error preparing shell script: %s", err)
}

View File

@ -1,8 +1,9 @@
package chroot
import (
"github.com/hashicorp/packer/packer"
"testing"
"github.com/hashicorp/packer/packer"
)
func TestCommunicator_ImplementsCommunicator(t *testing.T) {

View File

@ -0,0 +1,10 @@
package chroot
import "testing"
func TestDevicePrefixMatch(t *testing.T) {
/*
if devicePrefixMatch("nvme0n1") != "" {
}
*/
}

View File

@ -1,16 +1,18 @@
package chroot
import (
"context"
"fmt"
sl "github.com/hashicorp/packer/common/shell-local"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/post-processor/shell-local"
"github.com/hashicorp/packer/template/interpolate"
)
func RunLocalCommands(commands []string, wrappedCommand CommandWrapper, ctx interpolate.Context, ui packer.Ui) error {
func RunLocalCommands(commands []string, wrappedCommand CommandWrapper, ictx interpolate.Context, ui packer.Ui) error {
ctx := context.TODO()
for _, rawCmd := range commands {
intCmd, err := interpolate.Render(rawCmd, &ctx)
intCmd, err := interpolate.Render(rawCmd, &ictx)
if err != nil {
return fmt.Errorf("Error interpolating: %s", err)
}
@ -21,15 +23,17 @@ func RunLocalCommands(commands []string, wrappedCommand CommandWrapper, ctx inte
}
ui.Say(fmt.Sprintf("Executing command: %s", command))
comm := &shell_local.Communicator{}
comm := &sl.Communicator{
ExecuteCommand: []string{"sh", "-c", command},
}
cmd := &packer.RemoteCmd{Command: command}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return fmt.Errorf("Error executing command: %s", err)
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf(
"Received non-zero exit code %d from command: %s",
cmd.ExitStatus,
cmd.ExitStatus(),
command)
}
}

View File

@ -1,15 +1,15 @@
package chroot
import (
"errors"
"context"
"fmt"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
awscommon "github.com/hashicorp/packer/builder/amazon/common"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
// StepAttachVolume attaches the previously created volume to an
@ -23,7 +23,7 @@ type StepAttachVolume struct {
volumeId string
}
func (s *StepAttachVolume) Run(state multistep.StateBag) multistep.StepAction {
func (s *StepAttachVolume) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
ec2conn := state.Get("ec2").(*ec2.EC2)
device := state.Get("device").(string)
instance := state.Get("instance").(*ec2.Instance)
@ -51,35 +51,7 @@ func (s *StepAttachVolume) Run(state multistep.StateBag) multistep.StepAction {
s.volumeId = volumeId
// Wait for the volume to become attached
stateChange := awscommon.StateChangeConf{
Pending: []string{"attaching"},
StepState: state,
Target: "attached",
Refresh: func() (interface{}, string, error) {
attempts := 0
for attempts < 30 {
resp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&volumeId}})
if err != nil {
return nil, "", err
}
if len(resp.Volumes[0].Attachments) > 0 {
a := resp.Volumes[0].Attachments[0]
return a, *a.State, nil
}
// When Attachment on volume is not present sleep for 2s and retry
attempts += 1
ui.Say(fmt.Sprintf(
"Volume %s show no attachments. Attempt %d/30. Sleeping for 2s and will retry.",
volumeId, attempts))
time.Sleep(2 * time.Second)
}
// Attachment on volume is not present after all attempts
return nil, "", errors.New("No attachments on volume.")
},
}
_, err = awscommon.WaitForState(&stateChange)
err = awscommon.WaitUntilVolumeAttached(ctx, ec2conn, s.volumeId)
if err != nil {
err := fmt.Errorf("Error waiting for volume: %s", err)
state.Put("error", err)
@ -115,26 +87,7 @@ func (s *StepAttachVolume) CleanupFunc(state multistep.StateBag) error {
s.attached = false
// Wait for the volume to detach
stateChange := awscommon.StateChangeConf{
Pending: []string{"attaching", "attached", "detaching"},
StepState: state,
Target: "detached",
Refresh: func() (interface{}, string, error) {
resp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&s.volumeId}})
if err != nil {
return nil, "", err
}
v := resp.Volumes[0]
if len(v.Attachments) > 0 {
return v, *v.Attachments[0].State, nil
} else {
return v, "detached", nil
}
},
}
_, err = awscommon.WaitForState(&stateChange)
err = awscommon.WaitUntilVolumeDetached(aws.BackgroundContext(), ec2conn, s.volumeId)
if err != nil {
return fmt.Errorf("Error waiting for volume: %s", err)
}

View File

@ -1,17 +1,18 @@
package chroot
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
// StepCheckRootDevice makes sure the root device on the AMI is EBS-backed.
type StepCheckRootDevice struct{}
func (s *StepCheckRootDevice) Run(state multistep.StateBag) multistep.StepAction {
func (s *StepCheckRootDevice) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
image := state.Get("source_image").(*ec2.Image)
ui := state.Get("ui").(packer.Ui)

View File

@ -1,17 +1,18 @@
package chroot
import (
"context"
"log"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
// StepChrootProvision provisions the instance within a chroot.
type StepChrootProvision struct {
}
func (s *StepChrootProvision) Run(state multistep.StateBag) multistep.StepAction {
func (s *StepChrootProvision) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
hook := state.Get("hook").(packer.Hook)
mountPath := state.Get("mount_path").(string)
ui := state.Get("ui").(packer.Ui)
@ -25,7 +26,7 @@ func (s *StepChrootProvision) Run(state multistep.StateBag) multistep.StepAction
// Provision
log.Println("Running the provision hook")
if err := hook.Run(packer.HookProvision, ui, comm, nil); err != nil {
if err := hook.Run(ctx, packer.HookProvision, ui, comm, nil); err != nil {
state.Put("error", err)
return multistep.ActionHalt
}

View File

@ -2,11 +2,13 @@ package chroot
import (
"bytes"
"context"
"fmt"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
"log"
"path/filepath"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
// StepCopyFiles copies some files from the host into the chroot environment.
@ -18,7 +20,7 @@ type StepCopyFiles struct {
files []string
}
func (s *StepCopyFiles) Run(state multistep.StateBag) multistep.StepAction {
func (s *StepCopyFiles) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
mountPath := state.Get("mount_path").(string)
ui := state.Get("ui").(packer.Ui)

View File

@ -1,14 +1,17 @@
package chroot
import (
"context"
"errors"
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
awscommon "github.com/hashicorp/packer/builder/amazon/common"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
"github.com/hashicorp/packer/template/interpolate"
)
// StepCreateVolume creates a new volume from the snapshot of the root
@ -19,21 +22,54 @@ import (
type StepCreateVolume struct {
volumeId string
RootVolumeSize int64
RootVolumeType string
RootVolumeTags awscommon.TagMap
Ctx interpolate.Context
}
func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction {
func (s *StepCreateVolume) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
ec2conn := state.Get("ec2").(*ec2.EC2)
instance := state.Get("instance").(*ec2.Instance)
ui := state.Get("ui").(packer.Ui)
volTags, err := s.RootVolumeTags.EC2Tags(s.Ctx, *ec2conn.Config.Region, state)
if err != nil {
err := fmt.Errorf("Error tagging volumes: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
// Collect tags for tagging on resource creation
var tagSpecs []*ec2.TagSpecification
if len(volTags) > 0 {
runVolTags := &ec2.TagSpecification{
ResourceType: aws.String("volume"),
Tags: volTags,
}
tagSpecs = append(tagSpecs, runVolTags)
}
var createVolume *ec2.CreateVolumeInput
if config.FromScratch {
rootVolumeType := ec2.VolumeTypeGp2
if s.RootVolumeType == "io1" {
err := errors.New("Cannot use io1 volume when building from scratch")
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
} else if s.RootVolumeType != "" {
rootVolumeType = s.RootVolumeType
}
createVolume = &ec2.CreateVolumeInput{
AvailabilityZone: instance.Placement.AvailabilityZone,
Size: aws.Int64(s.RootVolumeSize),
VolumeType: aws.String(ec2.VolumeTypeGp2),
VolumeType: aws.String(rootVolumeType),
}
} else {
// Determine the root device snapshot
image := state.Get("source_image").(*ec2.Image)
@ -46,28 +82,19 @@ func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction {
}
}
if rootDevice == nil {
err := fmt.Errorf("Couldn't find root device!")
ui.Say("Creating the root volume...")
createVolume, err = s.buildCreateVolumeInput(*instance.Placement.AvailabilityZone, rootDevice)
if err != nil {
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
ui.Say("Creating the root volume...")
vs := *rootDevice.Ebs.VolumeSize
if s.RootVolumeSize > *rootDevice.Ebs.VolumeSize {
vs = s.RootVolumeSize
}
createVolume = &ec2.CreateVolumeInput{
AvailabilityZone: instance.Placement.AvailabilityZone,
Size: aws.Int64(vs),
SnapshotId: rootDevice.Ebs.SnapshotId,
VolumeType: rootDevice.Ebs.VolumeType,
Iops: rootDevice.Ebs.Iops,
}
}
if len(tagSpecs) > 0 {
createVolume.SetTagSpecifications(tagSpecs)
volTags.Report(ui)
}
log.Printf("Create args: %+v", createVolume)
createVolumeResp, err := ec2conn.CreateVolume(createVolume)
@ -83,22 +110,7 @@ func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction {
log.Printf("Volume ID: %s", s.volumeId)
// Wait for the volume to become ready
stateChange := awscommon.StateChangeConf{
Pending: []string{"creating"},
StepState: state,
Target: "available",
Refresh: func() (interface{}, string, error) {
resp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&s.volumeId}})
if err != nil {
return nil, "", err
}
v := resp.Volumes[0]
return v, *v.State, nil
},
}
_, err = awscommon.WaitForState(&stateChange)
err = awscommon.WaitUntilVolumeAvailable(ctx, ec2conn, s.volumeId)
if err != nil {
err := fmt.Errorf("Error waiting for volume: %s", err)
state.Put("error", err)
@ -124,3 +136,33 @@ func (s *StepCreateVolume) Cleanup(state multistep.StateBag) {
ui.Error(fmt.Sprintf("Error deleting EBS volume: %s", err))
}
}
func (s *StepCreateVolume) buildCreateVolumeInput(az string, rootDevice *ec2.BlockDeviceMapping) (*ec2.CreateVolumeInput, error) {
if rootDevice == nil {
return nil, fmt.Errorf("Couldn't find root device!")
}
createVolumeInput := &ec2.CreateVolumeInput{
AvailabilityZone: aws.String(az),
Size: rootDevice.Ebs.VolumeSize,
SnapshotId: rootDevice.Ebs.SnapshotId,
VolumeType: rootDevice.Ebs.VolumeType,
Iops: rootDevice.Ebs.Iops,
}
if s.RootVolumeSize > *rootDevice.Ebs.VolumeSize {
createVolumeInput.Size = aws.Int64(s.RootVolumeSize)
}
if s.RootVolumeType == "" || s.RootVolumeType == *rootDevice.Ebs.VolumeType {
return createVolumeInput, nil
}
if s.RootVolumeType == "io1" {
return nil, fmt.Errorf("Root volume type cannot be io1, because existing root volume type was %s", *rootDevice.Ebs.VolumeType)
}
createVolumeInput.VolumeType = aws.String(s.RootVolumeType)
// non io1 cannot set iops
createVolumeInput.Iops = nil
return createVolumeInput, nil
}

View File

@ -0,0 +1,74 @@
package chroot
import (
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/stretchr/testify/assert"
)
func buildTestRootDevice() *ec2.BlockDeviceMapping {
return &ec2.BlockDeviceMapping{
Ebs: &ec2.EbsBlockDevice{
VolumeSize: aws.Int64(10),
SnapshotId: aws.String("snap-1234"),
VolumeType: aws.String("gp2"),
},
}
}
func TestCreateVolume_Default(t *testing.T) {
stepCreateVolume := new(StepCreateVolume)
_, err := stepCreateVolume.buildCreateVolumeInput("test-az", buildTestRootDevice())
assert.NoError(t, err)
}
func TestCreateVolume_Shrink(t *testing.T) {
stepCreateVolume := StepCreateVolume{RootVolumeSize: 1}
testRootDevice := buildTestRootDevice()
ret, err := stepCreateVolume.buildCreateVolumeInput("test-az", testRootDevice)
assert.NoError(t, err)
// Ensure that the new value is equal to the size of the old root device
assert.Equal(t, *ret.Size, *testRootDevice.Ebs.VolumeSize)
}
func TestCreateVolume_Expand(t *testing.T) {
stepCreateVolume := StepCreateVolume{RootVolumeSize: 25}
testRootDevice := buildTestRootDevice()
ret, err := stepCreateVolume.buildCreateVolumeInput("test-az", testRootDevice)
assert.NoError(t, err)
// Ensure that the new value is equal to the size of the value passed in
assert.Equal(t, *ret.Size, stepCreateVolume.RootVolumeSize)
}
func TestCreateVolume_io1_to_io1(t *testing.T) {
stepCreateVolume := StepCreateVolume{RootVolumeType: "io1"}
testRootDevice := buildTestRootDevice()
testRootDevice.Ebs.VolumeType = aws.String("io1")
testRootDevice.Ebs.Iops = aws.Int64(1000)
ret, err := stepCreateVolume.buildCreateVolumeInput("test-az", testRootDevice)
assert.NoError(t, err)
assert.Equal(t, *ret.VolumeType, stepCreateVolume.RootVolumeType)
assert.Equal(t, *ret.Iops, *testRootDevice.Ebs.Iops)
}
func TestCreateVolume_io1_to_gp2(t *testing.T) {
stepCreateVolume := StepCreateVolume{RootVolumeType: "gp2"}
testRootDevice := buildTestRootDevice()
testRootDevice.Ebs.VolumeType = aws.String("io1")
testRootDevice.Ebs.Iops = aws.Int64(1000)
ret, err := stepCreateVolume.buildCreateVolumeInput("test-az", testRootDevice)
assert.NoError(t, err)
assert.Equal(t, *ret.VolumeType, stepCreateVolume.RootVolumeType)
assert.Nil(t, ret.Iops)
}
func TestCreateVolume_gp2_to_io1(t *testing.T) {
stepCreateVolume := StepCreateVolume{RootVolumeType: "io1"}
testRootDevice := buildTestRootDevice()
_, err := stepCreateVolume.buildCreateVolumeInput("test-az", testRootDevice)
assert.Error(t, err)
}

View File

@ -1,17 +1,19 @@
package chroot
import (
"context"
"fmt"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
"log"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
// StepEarlyCleanup performs some of the cleanup steps early in order to
// prepare for snapshotting and creating an AMI.
type StepEarlyCleanup struct{}
func (s *StepEarlyCleanup) Run(state multistep.StateBag) multistep.StepAction {
func (s *StepEarlyCleanup) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
ui := state.Get("ui").(packer.Ui)
cleanupKeys := []string{
"copy_files_cleanup",

View File

@ -1,16 +1,18 @@
package chroot
import (
"context"
"fmt"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
"log"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
// StepEarlyUnflock unlocks the flock.
type StepEarlyUnflock struct{}
func (s *StepEarlyUnflock) Run(state multistep.StateBag) multistep.StepAction {
func (s *StepEarlyUnflock) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
cleanup := state.Get("flock_cleanup").(Cleanup)
ui := state.Get("ui").(packer.Ui)

View File

@ -1,12 +1,14 @@
package chroot
import (
"context"
"fmt"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
"log"
"os"
"path/filepath"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
// StepFlock provisions the instance within a chroot.
@ -17,7 +19,7 @@ type StepFlock struct {
fh *os.File
}
func (s *StepFlock) Run(state multistep.StateBag) multistep.StepAction {
func (s *StepFlock) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
ui := state.Get("ui").(packer.Ui)
lockfile := "/var/lock/packer-chroot/lock"

View File

@ -1,20 +1,21 @@
package chroot
import (
"context"
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
// StepInstanceInfo verifies that this builder is running on an EC2 instance.
type StepInstanceInfo struct{}
func (s *StepInstanceInfo) Run(state multistep.StateBag) multistep.StepAction {
func (s *StepInstanceInfo) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
ec2conn := state.Get("ec2").(*ec2.EC2)
session := state.Get("awsSession").(*session.Session)
ui := state.Get("ui").(packer.Ui)

View File

@ -2,6 +2,7 @@ package chroot
import (
"bytes"
"context"
"fmt"
"log"
"os"
@ -9,9 +10,9 @@ import (
"strings"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/template/interpolate"
"github.com/mitchellh/multistep"
)
type mountPathData struct {
@ -25,19 +26,23 @@ type mountPathData struct {
// mount_device_cleanup CleanupFunc - To perform early cleanup
type StepMountDevice struct {
MountOptions []string
MountPartition int
MountPartition string
mountPath string
}
func (s *StepMountDevice) Run(state multistep.StateBag) multistep.StepAction {
func (s *StepMountDevice) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
ui := state.Get("ui").(packer.Ui)
device := state.Get("device").(string)
if config.NVMEDevicePath != "" {
// customizable device path for mounting NVME block devices on c5 and m5 HVM
device = config.NVMEDevicePath
}
wrappedCommand := state.Get("wrappedCommand").(CommandWrapper)
var virtualizationType string
if config.FromScratch {
if config.FromScratch || config.AMIVirtType != "" {
virtualizationType = config.AMIVirtType
} else {
image := state.Get("source_image").(*ec2.Image)
@ -45,9 +50,10 @@ func (s *StepMountDevice) Run(state multistep.StateBag) multistep.StepAction {
log.Printf("Source image virtualization type is: %s", virtualizationType)
}
ctx := config.ctx
ctx.Data = &mountPathData{Device: filepath.Base(device)}
mountPath, err := interpolate.Render(config.MountPath, &ctx)
ictx := config.ctx
ictx.Data = &mountPathData{Device: filepath.Base(device)}
mountPath, err := interpolate.Render(config.MountPath, &ictx)
if err != nil {
err := fmt.Errorf("Error preparing mount directory: %s", err)
@ -74,8 +80,9 @@ func (s *StepMountDevice) Run(state multistep.StateBag) multistep.StepAction {
}
deviceMount := device
if virtualizationType == "hvm" {
deviceMount = fmt.Sprintf("%s%d", device, s.MountPartition)
if virtualizationType == "hvm" && s.MountPartition != "0" {
deviceMount = fmt.Sprintf("%s%s", device, s.MountPartition)
}
state.Put("deviceMount", deviceMount)
@ -96,7 +103,7 @@ func (s *StepMountDevice) Run(state multistep.StateBag) multistep.StepAction {
ui.Error(err.Error())
return multistep.ActionHalt
}
log.Printf("[DEBUG] (step mount) mount command is %s", mountCommand)
cmd := ShellCommand(mountCommand)
cmd.Stderr = stderr
if err := cmd.Run(); err != nil {

View File

@ -2,12 +2,14 @@ package chroot
import (
"bytes"
"context"
"fmt"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
"os"
"os/exec"
"syscall"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
// StepMountExtra mounts the attached device.
@ -18,7 +20,7 @@ type StepMountExtra struct {
mounts []string
}
func (s *StepMountExtra) Run(state multistep.StateBag) multistep.StepAction {
func (s *StepMountExtra) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
mountPath := state.Get("mount_path").(string)
ui := state.Get("ui").(packer.Ui)

View File

@ -1,8 +1,10 @@
package chroot
import (
"context"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type postMountCommandsData struct {
@ -16,7 +18,7 @@ type StepPostMountCommands struct {
Commands []string
}
func (s *StepPostMountCommands) Run(state multistep.StateBag) multistep.StepAction {
func (s *StepPostMountCommands) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
device := state.Get("device").(string)
mountPath := state.Get("mount_path").(string)
@ -27,14 +29,14 @@ func (s *StepPostMountCommands) Run(state multistep.StateBag) multistep.StepActi
return multistep.ActionContinue
}
ctx := config.ctx
ctx.Data = &postMountCommandsData{
ictx := config.ctx
ictx.Data = &postMountCommandsData{
Device: device,
MountPath: mountPath,
}
ui.Say("Running post-mount commands...")
if err := RunLocalCommands(s.Commands, wrappedCommand, ctx, ui); err != nil {
if err := RunLocalCommands(s.Commands, wrappedCommand, ictx, ui); err != nil {
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt

View File

@ -1,8 +1,10 @@
package chroot
import (
"context"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type preMountCommandsData struct {
@ -14,7 +16,7 @@ type StepPreMountCommands struct {
Commands []string
}
func (s *StepPreMountCommands) Run(state multistep.StateBag) multistep.StepAction {
func (s *StepPreMountCommands) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
device := state.Get("device").(string)
ui := state.Get("ui").(packer.Ui)
@ -24,11 +26,11 @@ func (s *StepPreMountCommands) Run(state multistep.StateBag) multistep.StepActio
return multistep.ActionContinue
}
ctx := config.ctx
ctx.Data = &preMountCommandsData{Device: device}
ictx := config.ctx
ictx.Data = &preMountCommandsData{Device: device}
ui.Say("Running device setup commands...")
if err := RunLocalCommands(s.Commands, wrappedCommand, ctx, ui); err != nil {
if err := RunLocalCommands(s.Commands, wrappedCommand, ictx, ui); err != nil {
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt

View File

@ -1,19 +1,20 @@
package chroot
import (
"context"
"fmt"
"log"
"os"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
// StepPrepareDevice finds an available device and sets it.
type StepPrepareDevice struct {
}
func (s *StepPrepareDevice) Run(state multistep.StateBag) multistep.StepAction {
func (s *StepPrepareDevice) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
ui := state.Get("ui").(packer.Ui)

View File

@ -1,80 +1,40 @@
package chroot
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
awscommon "github.com/hashicorp/packer/builder/amazon/common"
confighelper "github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
// StepRegisterAMI creates the AMI.
type StepRegisterAMI struct {
RootVolumeSize int64
EnableAMIENASupport bool
EnableAMIENASupport confighelper.Trilean
EnableAMISriovNetSupport bool
}
func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
func (s *StepRegisterAMI) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
ec2conn := state.Get("ec2").(*ec2.EC2)
snapshotId := state.Get("snapshot_id").(string)
snapshotID := state.Get("snapshot_id").(string)
ui := state.Get("ui").(packer.Ui)
ui.Say("Registering the AMI...")
var (
registerOpts *ec2.RegisterImageInput
mappings []*ec2.BlockDeviceMapping
image *ec2.Image
rootDeviceName string
)
var registerOpts *ec2.RegisterImageInput
// Source Image is only required to be passed if the image is not from scratch
if config.FromScratch {
mappings = config.AMIBlockDevices.BuildAMIDevices()
rootDeviceName = config.RootDeviceName
registerOpts = buildBaseRegisterOpts(config, nil, s.RootVolumeSize, snapshotID)
} else {
image = state.Get("source_image").(*ec2.Image)
mappings = image.BlockDeviceMappings
rootDeviceName = *image.RootDeviceName
}
newMappings := make([]*ec2.BlockDeviceMapping, len(mappings))
for i, device := range mappings {
newDevice := device
if *newDevice.DeviceName == rootDeviceName {
if newDevice.Ebs != nil {
newDevice.Ebs.SnapshotId = aws.String(snapshotId)
} else {
newDevice.Ebs = &ec2.EbsBlockDevice{SnapshotId: aws.String(snapshotId)}
}
if config.FromScratch || s.RootVolumeSize > *newDevice.Ebs.VolumeSize {
newDevice.Ebs.VolumeSize = aws.Int64(s.RootVolumeSize)
}
}
// assume working from a snapshot, so we unset the Encrypted field if set,
// otherwise AWS API will return InvalidParameter
if newDevice.Ebs != nil && newDevice.Ebs.Encrypted != nil {
newDevice.Ebs.Encrypted = nil
}
newMappings[i] = newDevice
}
if config.FromScratch {
registerOpts = &ec2.RegisterImageInput{
Name: &config.AMIName,
Architecture: aws.String(ec2.ArchitectureValuesX8664),
RootDeviceName: aws.String(rootDeviceName),
VirtualizationType: aws.String(config.AMIVirtType),
BlockDeviceMappings: newMappings,
}
} else {
registerOpts = buildRegisterOpts(config, image, newMappings)
image := state.Get("source_image").(*ec2.Image)
registerOpts = buildBaseRegisterOpts(config, image, s.RootVolumeSize, snapshotID)
}
if s.EnableAMISriovNetSupport {
@ -82,7 +42,7 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
// As of February 2017, this applies to C3, C4, D2, I2, R3, and M4 (excluding m4.16xlarge)
registerOpts.SriovNetSupport = aws.String("simple")
}
if s.EnableAMIENASupport {
if s.EnableAMIENASupport.True() {
// Set EnaSupport to true
// As of February 2017, this applies to C5, I3, P2, R4, X1, and m4.16xlarge
registerOpts.EnaSupport = aws.Bool(true)
@ -101,16 +61,8 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
amis[*ec2conn.Config.Region] = *registerResp.ImageId
state.Put("amis", amis)
// Wait for the image to become ready
stateChange := awscommon.StateChangeConf{
Pending: []string{"pending"},
Target: "available",
Refresh: awscommon.AMIStateRefreshFunc(ec2conn, *registerResp.ImageId),
StepState: state,
}
ui.Say("Waiting for AMI to become ready...")
if _, err := awscommon.WaitForState(&stateChange); err != nil {
if err := awscommon.WaitUntilAMIAvailable(ctx, ec2conn, *registerResp.ImageId); err != nil {
err := fmt.Errorf("Error waiting for AMI: %s", err)
state.Put("error", err)
ui.Error(err.Error())
@ -122,11 +74,65 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
func (s *StepRegisterAMI) Cleanup(state multistep.StateBag) {}
func buildRegisterOpts(config *Config, image *ec2.Image, mappings []*ec2.BlockDeviceMapping) *ec2.RegisterImageInput {
// Builds the base register opts with architecture, name, root block device, mappings, virtualizationtype
func buildBaseRegisterOpts(config *Config, sourceImage *ec2.Image, rootVolumeSize int64, snapshotID string) *ec2.RegisterImageInput {
var (
mappings []*ec2.BlockDeviceMapping
rootDeviceName string
)
generatingNewBlockDeviceMappings := config.FromScratch || len(config.AMIMappings) > 0
if generatingNewBlockDeviceMappings {
mappings = config.AMIMappings.BuildEC2BlockDeviceMappings()
rootDeviceName = config.RootDeviceName
} else {
// If config.FromScratch is false, source image must be set
mappings = sourceImage.BlockDeviceMappings
rootDeviceName = *sourceImage.RootDeviceName
}
newMappings := make([]*ec2.BlockDeviceMapping, len(mappings))
for i, device := range mappings {
newDevice := device
if *newDevice.DeviceName == rootDeviceName {
if newDevice.Ebs != nil {
newDevice.Ebs.SnapshotId = aws.String(snapshotID)
} else {
newDevice.Ebs = &ec2.EbsBlockDevice{SnapshotId: aws.String(snapshotID)}
}
if generatingNewBlockDeviceMappings || rootVolumeSize > *newDevice.Ebs.VolumeSize {
newDevice.Ebs.VolumeSize = aws.Int64(rootVolumeSize)
}
}
// assume working from a snapshot, so we unset the Encrypted field if set,
// otherwise AWS API will return InvalidParameter
if newDevice.Ebs != nil && newDevice.Ebs.Encrypted != nil {
newDevice.Ebs.Encrypted = nil
}
newMappings[i] = newDevice
}
if config.FromScratch {
return &ec2.RegisterImageInput{
Name: &config.AMIName,
Architecture: aws.String(config.Architecture),
RootDeviceName: aws.String(rootDeviceName),
VirtualizationType: aws.String(config.AMIVirtType),
BlockDeviceMappings: newMappings,
}
}
return buildRegisterOptsFromExistingImage(config, sourceImage, newMappings, rootDeviceName)
}
func buildRegisterOptsFromExistingImage(config *Config, image *ec2.Image, mappings []*ec2.BlockDeviceMapping, rootDeviceName string) *ec2.RegisterImageInput {
registerOpts := &ec2.RegisterImageInput{
Name: &config.AMIName,
Architecture: image.Architecture,
RootDeviceName: image.RootDeviceName,
RootDeviceName: &rootDeviceName,
BlockDeviceMappings: mappings,
VirtualizationType: image.VirtualizationType,
}

View File

@ -3,6 +3,9 @@ package chroot
import (
"testing"
amazon "github.com/hashicorp/packer/builder/amazon/common"
"github.com/hashicorp/packer/common"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
)
@ -21,12 +24,13 @@ func TestStepRegisterAmi_buildRegisterOpts_pv(t *testing.T) {
config.AMIName = "test_ami_name"
config.AMIDescription = "test_ami_description"
config.AMIVirtType = "paravirtual"
rootDeviceName := "foo"
image := testImage()
blockDevices := []*ec2.BlockDeviceMapping{}
opts := buildRegisterOpts(&config, &image, blockDevices)
opts := buildRegisterOptsFromExistingImage(&config, &image, blockDevices, rootDeviceName)
expected := config.AMIVirtType
if *opts.VirtualizationType != expected {
@ -43,6 +47,10 @@ func TestStepRegisterAmi_buildRegisterOpts_pv(t *testing.T) {
t.Fatalf("Unexpected KernelId value: expected %s got %s\n", expected, *opts.KernelId)
}
expected = rootDeviceName
if *opts.RootDeviceName != expected {
t.Fatalf("Unexpected RootDeviceName value: expected %s got %s\n", expected, *opts.RootDeviceName)
}
}
func TestStepRegisterAmi_buildRegisterOpts_hvm(t *testing.T) {
@ -50,12 +58,13 @@ func TestStepRegisterAmi_buildRegisterOpts_hvm(t *testing.T) {
config.AMIName = "test_ami_name"
config.AMIDescription = "test_ami_description"
config.AMIVirtType = "hvm"
rootDeviceName := "foo"
image := testImage()
blockDevices := []*ec2.BlockDeviceMapping{}
opts := buildRegisterOpts(&config, &image, blockDevices)
opts := buildRegisterOptsFromExistingImage(&config, &image, blockDevices, rootDeviceName)
expected := config.AMIVirtType
if *opts.VirtualizationType != expected {
@ -70,4 +79,138 @@ func TestStepRegisterAmi_buildRegisterOpts_hvm(t *testing.T) {
if opts.KernelId != nil {
t.Fatalf("Unexpected KernelId value: expected nil got %s\n", *opts.KernelId)
}
expected = rootDeviceName
if *opts.RootDeviceName != expected {
t.Fatalf("Unexpected RootDeviceName value: expected %s got %s\n", expected, *opts.RootDeviceName)
}
}
func TestStepRegisterAmi_buildRegisterOptsFromScratch(t *testing.T) {
rootDeviceName := "/dev/sda"
snapshotID := "foo"
config := Config{
FromScratch: true,
PackerConfig: common.PackerConfig{},
AMIMappings: []BlockDevice{
{BlockDevice: amazon.BlockDevice{
DeviceName: rootDeviceName,
}},
},
RootDeviceName: rootDeviceName,
}
registerOpts := buildBaseRegisterOpts(&config, nil, 10, snapshotID)
if len(registerOpts.BlockDeviceMappings) != 1 {
t.Fatal("Expected block device mapping of length 1")
}
if *registerOpts.BlockDeviceMappings[0].Ebs.SnapshotId != snapshotID {
t.Fatalf("Snapshot ID of root disk not set to snapshot id %s", rootDeviceName)
}
}
func TestStepRegisterAmi_buildRegisterOptFromExistingImage(t *testing.T) {
rootDeviceName := "/dev/sda"
snapshotID := "foo"
config := Config{
FromScratch: false,
PackerConfig: common.PackerConfig{},
}
sourceImage := ec2.Image{
RootDeviceName: &rootDeviceName,
BlockDeviceMappings: []*ec2.BlockDeviceMapping{
{
DeviceName: &rootDeviceName,
Ebs: &ec2.EbsBlockDevice{
VolumeSize: aws.Int64(10),
},
},
// Throw in an ephemeral device, it seems like all devices in the return struct in a source AMI have
// a size, even if it's for ephemeral
{
DeviceName: aws.String("/dev/sdb"),
VirtualName: aws.String("ephemeral0"),
Ebs: &ec2.EbsBlockDevice{
VolumeSize: aws.Int64(0),
},
},
},
}
registerOpts := buildBaseRegisterOpts(&config, &sourceImage, 15, snapshotID)
if len(registerOpts.BlockDeviceMappings) != 2 {
t.Fatal("Expected block device mapping of length 2")
}
for _, dev := range registerOpts.BlockDeviceMappings {
if dev.Ebs.SnapshotId != nil && *dev.Ebs.SnapshotId == snapshotID {
// Even though root volume size is in config, it isn't used, instead we use the root volume size
// that's derived when we build the step
if *dev.Ebs.VolumeSize != 15 {
t.Fatalf("Root volume size not 15 GB instead %d", *dev.Ebs.VolumeSize)
}
return
}
}
t.Fatalf("Could not find device with snapshot ID %s", snapshotID)
}
func TestStepRegisterAmi_buildRegisterOptFromExistingImageWithBlockDeviceMappings(t *testing.T) {
const (
rootDeviceName = "/dev/xvda"
oldRootDevice = "/dev/sda1"
)
snapshotId := "foo"
config := Config{
FromScratch: false,
PackerConfig: common.PackerConfig{},
AMIMappings: []BlockDevice{
{BlockDevice: amazon.BlockDevice{
DeviceName: rootDeviceName,
}},
},
RootDeviceName: rootDeviceName,
}
// Intentionally try to use a different root devicename
sourceImage := ec2.Image{
RootDeviceName: aws.String(oldRootDevice),
BlockDeviceMappings: []*ec2.BlockDeviceMapping{
{
DeviceName: aws.String(oldRootDevice),
Ebs: &ec2.EbsBlockDevice{
VolumeSize: aws.Int64(10),
},
},
// Throw in an ephemeral device, it seems like all devices in the return struct in a source AMI have
// a size, even if it's for ephemeral
{
DeviceName: aws.String("/dev/sdb"),
VirtualName: aws.String("ephemeral0"),
Ebs: &ec2.EbsBlockDevice{
VolumeSize: aws.Int64(0),
},
},
},
}
registerOpts := buildBaseRegisterOpts(&config, &sourceImage, 15, snapshotId)
if len(registerOpts.BlockDeviceMappings) != 1 {
t.Fatal("Expected block device mapping of length 1")
}
if *registerOpts.BlockDeviceMappings[0].Ebs.SnapshotId != snapshotId {
t.Fatalf("Snapshot ID of root disk set to '%s' expected '%s'", *registerOpts.BlockDeviceMappings[0].Ebs.SnapshotId, rootDeviceName)
}
if *registerOpts.RootDeviceName != rootDeviceName {
t.Fatalf("Root device set to '%s' expected %s", *registerOpts.RootDeviceName, rootDeviceName)
}
if *registerOpts.BlockDeviceMappings[0].Ebs.VolumeSize != 15 {
t.Fatalf("Size of root disk not set to 15 GB, instead %d", *registerOpts.BlockDeviceMappings[0].Ebs.VolumeSize)
}
}

View File

@ -1,14 +1,14 @@
package chroot
import (
"errors"
"context"
"fmt"
"time"
"github.com/aws/aws-sdk-go/service/ec2"
awscommon "github.com/hashicorp/packer/builder/amazon/common"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
// StepSnapshot creates a snapshot of the created volume.
@ -19,7 +19,7 @@ type StepSnapshot struct {
snapshotId string
}
func (s *StepSnapshot) Run(state multistep.StateBag) multistep.StepAction {
func (s *StepSnapshot) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
ec2conn := state.Get("ec2").(*ec2.EC2)
ui := state.Get("ui").(packer.Ui)
volumeId := state.Get("volume_id").(string)
@ -43,26 +43,7 @@ func (s *StepSnapshot) Run(state multistep.StateBag) multistep.StepAction {
ui.Message(fmt.Sprintf("Snapshot ID: %s", s.snapshotId))
// Wait for the snapshot to be ready
stateChange := awscommon.StateChangeConf{
Pending: []string{"pending"},
StepState: state,
Target: "completed",
Refresh: func() (interface{}, string, error) {
resp, err := ec2conn.DescribeSnapshots(&ec2.DescribeSnapshotsInput{SnapshotIds: []*string{&s.snapshotId}})
if err != nil {
return nil, "", err
}
if len(resp.Snapshots) == 0 {
return nil, "", errors.New("No snapshots found.")
}
s := resp.Snapshots[0]
return s, *s.State, nil
},
}
_, err = awscommon.WaitForState(&stateChange)
err = awscommon.WaitUntilSnapshotDone(ctx, ec2conn, s.snapshotId)
if err != nil {
err := fmt.Errorf("Error waiting for snapshot: %s", err)
state.Put("error", err)

View File

@ -1,30 +1,129 @@
//go:generate struct-markdown
package common
import (
"crypto/tls"
"fmt"
"log"
"os"
"time"
"net/http"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/hashicorp/go-cleanhttp"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
cleanhttp "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/packer/template/interpolate"
vaultapi "github.com/hashicorp/vault/api"
)
type VaultAWSEngineOptions struct {
Name string `mapstructure:"name"`
RoleARN string `mapstructure:"role_arn"`
// Specifies the TTL for the use of the STS token. This
// is specified as a string with a duration suffix. Valid only when
// credential_type is assumed_role or federation_token. When not
// specified, the default_sts_ttl set for the role will be used. If that
// is also not set, then the default value of 3600s will be used. AWS
// places limits on the maximum TTL allowed. See the AWS documentation on
// the DurationSeconds parameter for AssumeRole (for assumed_role
// credential types) and GetFederationToken (for federation_token
// credential types) for more details.
TTL string `mapstructure:"ttl" required:"false"`
EngineName string `mapstructure:"engine_name"`
}
func (v *VaultAWSEngineOptions) Empty() bool {
return len(v.Name) == 0 && len(v.RoleARN) == 0 &&
len(v.EngineName) == 0 && len(v.TTL) == 0
}
// AccessConfig is for common configuration related to AWS access
type AccessConfig struct {
AccessKey string `mapstructure:"access_key"`
CustomEndpointEc2 string `mapstructure:"custom_endpoint_ec2"`
MFACode string `mapstructure:"mfa_code"`
ProfileName string `mapstructure:"profile"`
RawRegion string `mapstructure:"region"`
SecretKey string `mapstructure:"secret_key"`
SkipValidation bool `mapstructure:"skip_region_validation"`
Token string `mapstructure:"token"`
session *session.Session
// The access key used to communicate with AWS. [Learn how to set this]
// (/docs/builders/amazon.html#specifying-amazon-credentials). On EBS, this
// is not required if you are using `use_vault_aws_engine` for
// authentication instead.
AccessKey string `mapstructure:"access_key" required:"true"`
// This option is useful if you use a cloud
// provider whose API is compatible with aws EC2. Specify another endpoint
// like this https://ec2.custom.endpoint.com.
CustomEndpointEc2 string `mapstructure:"custom_endpoint_ec2" required:"false"`
// Enable automatic decoding of any encoded authorization (error) messages
// using the `sts:DecodeAuthorizationMessage` API. Note: requires that the
// effective user/role have permissions to `sts:DecodeAuthorizationMessage`
// on resource `*`. Default `false`.
DecodeAuthZMessages bool `mapstructure:"decode_authorization_messages" required:"false"`
// This allows skipping TLS
// verification of the AWS EC2 endpoint. The default is false.
InsecureSkipTLSVerify bool `mapstructure:"insecure_skip_tls_verify" required:"false"`
// The MFA
// [TOTP](https://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm)
// code. This should probably be a user variable since it changes all the
// time.
MFACode string `mapstructure:"mfa_code" required:"false"`
// The profile to use in the shared credentials file for
// AWS. See Amazon's documentation on [specifying
// profiles](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-profiles)
// for more details.
ProfileName string `mapstructure:"profile" required:"false"`
// The name of the region, such as `us-east-1`, in which
// to launch the EC2 instance to create the AMI.
// When chroot building, this value is guessed from environment.
RawRegion string `mapstructure:"region" required:"true"`
// The secret key used to communicate with AWS. [Learn how to set
// this](amazon.html#specifying-amazon-credentials). This is not required
// if you are using `use_vault_aws_engine` for authentication instead.
SecretKey string `mapstructure:"secret_key" required:"true"`
// Set to true if you want to skip
// validation of the ami_regions configuration option. Default false.
SkipValidation bool `mapstructure:"skip_region_validation" required:"false"`
SkipMetadataApiCheck bool `mapstructure:"skip_metadata_api_check"`
// The access token to use. This is different from the
// access key and secret key. If you're not sure what this is, then you
// probably don't need it. This will also be read from the AWS_SESSION_TOKEN
// environmental variable.
Token string `mapstructure:"token" required:"false"`
session *session.Session
// Get credentials from Hashicorp Vault's aws secrets engine. You must
// already have created a role to use. For more information about
// generating credentials via the Vault engine, see the [Vault
// docs.](https://www.vaultproject.io/api/secret/aws/index.html#generate-credentials)
// If you set this flag, you must also set the below options:
// - `name` (string) - Required. Specifies the name of the role to generate
// credentials against. This is part of the request URL.
// - `engine_name` (string) - The name of the aws secrets engine. In the
// Vault docs, this is normally referred to as "aws", and Packer will
// default to "aws" if `engine_name` is not set.
// - `role_arn` (string)- The ARN of the role to assume if credential\_type
// on the Vault role is assumed\_role. Must match one of the allowed role
// ARNs in the Vault role. Optional if the Vault role only allows a single
// AWS role ARN; required otherwise.
// - `ttl` (string) - Specifies the TTL for the use of the STS token. This
// is specified as a string with a duration suffix. Valid only when
// credential\_type is assumed\_role or federation\_token. When not
// specified, the default\_sts\_ttl set for the role will be used. If that
// is also not set, then the default value of 3600s will be used. AWS
// places limits on the maximum TTL allowed. See the AWS documentation on
// the DurationSeconds parameter for AssumeRole (for assumed\_role
// credential types) and GetFederationToken (for federation\_token
// credential types) for more details.
//
// ``` json
// {
// "vault_aws_engine": {
// "name": "myrole",
// "role_arn": "myarn",
// "ttl": "3600s"
// }
// }
// ```
VaultAWSEngine VaultAWSEngineOptions `mapstructure:"vault_aws_engine" required:"false"`
getEC2Connection func() ec2iface.EC2API
}
// Config returns a valid aws.Config object for access to AWS services, or
@ -34,86 +133,161 @@ func (c *AccessConfig) Session() (*session.Session, error) {
return c.session, nil
}
config := aws.NewConfig().WithMaxRetries(11).WithCredentialsChainVerboseErrors(true)
config := aws.NewConfig().WithCredentialsChainVerboseErrors(true)
if c.ProfileName != "" {
if err := os.Setenv("AWS_PROFILE", c.ProfileName); err != nil {
return nil, fmt.Errorf("Set env error: %s", err)
}
staticCreds := credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token)
if _, err := staticCreds.Get(); err != credentials.ErrStaticCredentialsEmpty {
config.WithCredentials(staticCreds)
}
if c.RawRegion != "" {
config = config.WithRegion(c.RawRegion)
} else if region := c.metadataRegion(); region != "" {
config = config.WithRegion(region)
}
if c.CustomEndpointEc2 != "" {
config = config.WithEndpoint(c.CustomEndpointEc2)
}
if c.AccessKey != "" {
config = config.WithCredentials(
credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token))
config = config.WithHTTPClient(cleanhttp.DefaultClient())
transport := config.HTTPClient.Transport.(*http.Transport)
if c.InsecureSkipTLSVerify {
transport.TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
transport.Proxy = http.ProxyFromEnvironment
opts := session.Options{
SharedConfigState: session.SharedConfigEnable,
Config: *config,
}
if c.ProfileName != "" {
opts.Profile = c.ProfileName
}
if c.MFACode != "" {
opts.AssumeRoleTokenProvider = func() (string, error) {
return c.MFACode, nil
}
}
if sess, err := session.NewSessionWithOptions(opts); err != nil {
sess, err := session.NewSessionWithOptions(opts)
if err != nil {
return nil, err
} else if *sess.Config.Region == "" {
return nil, fmt.Errorf("Could not find AWS region, make sure it's set.")
} else {
log.Printf("Found region %s", *sess.Config.Region)
c.session = sess
}
log.Printf("Found region %s", *sess.Config.Region)
c.session = sess
cp, err := c.session.Config.Credentials.Get()
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" {
return nil, fmt.Errorf("No valid credential sources found for AWS Builder. " +
"Please see https://www.packer.io/docs/builders/amazon.html#specifying-amazon-credentials " +
"for more information on providing credentials for the AWS Builder.")
} else {
return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err)
}
}
log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName)
if c.DecodeAuthZMessages {
DecodeAuthZMessages(c.session)
}
LogEnvOverrideWarnings()
return c.session, nil
}
// metadataRegion returns the region from the metadata service
func (c *AccessConfig) metadataRegion() string {
client := cleanhttp.DefaultClient()
// Keep the default timeout (100ms) low as we don't want to wait in non-EC2 environments
client.Timeout = 100 * time.Millisecond
ec2meta := ec2metadata.New(session.New(), &aws.Config{
HTTPClient: client,
})
region, err := ec2meta.Region()
if err != nil {
log.Println("Error getting region from metadata service, "+
"probably because we're not running on AWS.", err)
return ""
func (c *AccessConfig) SessionRegion() string {
if c.session == nil {
panic("access config session should be set.")
}
return region
return aws.StringValue(c.session.Config.Region)
}
func (c *AccessConfig) IsGovCloud() bool {
return strings.HasPrefix(c.SessionRegion(), "us-gov-")
}
func (c *AccessConfig) IsChinaCloud() bool {
return strings.HasPrefix(c.SessionRegion(), "cn-")
}
func (c *AccessConfig) GetCredsFromVault() error {
// const EnvVaultAddress = "VAULT_ADDR"
// const EnvVaultToken = "VAULT_TOKEN"
vaultConfig := vaultapi.DefaultConfig()
cli, err := vaultapi.NewClient(vaultConfig)
if err != nil {
return fmt.Errorf("Error getting Vault client: %s", err)
}
if c.VaultAWSEngine.EngineName == "" {
c.VaultAWSEngine.EngineName = "aws"
}
path := fmt.Sprintf("/%s/creds/%s", c.VaultAWSEngine.EngineName,
c.VaultAWSEngine.Name)
secret, err := cli.Logical().Read(path)
if err != nil {
return fmt.Errorf("Error reading vault secret: %s", err)
}
if secret == nil {
return fmt.Errorf("Vault Secret does not exist at the given path.")
}
c.AccessKey = secret.Data["access_key"].(string)
c.SecretKey = secret.Data["secret_key"].(string)
token := secret.Data["security_token"]
if token != nil {
c.Token = token.(string)
} else {
c.Token = ""
}
return nil
}
func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error {
var errs []error
// Either both access and secret key must be set or neither of them should
// be.
if c.SkipMetadataApiCheck {
log.Println("(WARN) skip_metadata_api_check ignored.")
}
// Make sure it's obvious from the config how we're getting credentials:
// Vault, Packer config, or environment.
if !c.VaultAWSEngine.Empty() {
if len(c.AccessKey) > 0 {
errs = append(errs,
fmt.Errorf("If you have set vault_aws_engine, you must not set"+
" the access_key or secret_key."))
}
// Go ahead and grab those credentials from Vault now, so we can set
// the keys and token now.
err := c.GetCredsFromVault()
if err != nil {
errs = append(errs, err)
}
}
if (len(c.AccessKey) > 0) != (len(c.SecretKey) > 0) {
errs = append(errs,
fmt.Errorf("`access_key` and `secret_key` must both be either set or not set."))
}
if c.RawRegion != "" && !c.SkipValidation {
if valid := ValidateRegion(c.RawRegion); !valid {
errs = append(errs, fmt.Errorf("Unknown region: %s", c.RawRegion))
}
}
return errs
}
func (c *AccessConfig) NewEC2Connection() (ec2iface.EC2API, error) {
if c.getEC2Connection != nil {
return c.getEC2Connection(), nil
}
sess, err := c.Session()
if err != nil {
return nil, err
}
ec2conn := ec2.New(sess)
return ec2conn, nil
}

View File

@ -2,39 +2,69 @@ package common
import (
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
)
func testAccessConfig() *AccessConfig {
return &AccessConfig{}
return &AccessConfig{
getEC2Connection: func() ec2iface.EC2API {
return &mockEC2Client{}
},
}
}
func TestAccessConfigPrepare_Region(t *testing.T) {
c := testAccessConfig()
c.RawRegion = ""
if err := c.Prepare(nil); err != nil {
t.Fatalf("shouldn't have err: %s", err)
}
c.RawRegion = "us-east-12"
if err := c.Prepare(nil); err == nil {
t.Fatal("should have error")
err := c.ValidateRegion(c.RawRegion)
if err == nil {
t.Fatalf("should have region validation err: %s", c.RawRegion)
}
c.RawRegion = "us-east-1"
if err := c.Prepare(nil); err != nil {
t.Fatalf("shouldn't have err: %s", err)
err = c.ValidateRegion(c.RawRegion)
if err != nil {
t.Fatalf("shouldn't have region validation err: %s", c.RawRegion)
}
c.RawRegion = "custom"
if err := c.Prepare(nil); err == nil {
t.Fatalf("should have err")
err = c.ValidateRegion(c.RawRegion)
if err == nil {
t.Fatalf("should have region validation err: %s", c.RawRegion)
}
c.RawRegion = "custom"
c.SkipValidation = true
// testing whole prepare func here; this is checking that validation is
// skipped, so we don't need a mock connection
if err := c.Prepare(nil); err != nil {
t.Fatalf("shouldn't have err: %s", err)
}
c.SkipValidation = false
c.SkipValidation = false
c.RawRegion = ""
if err := c.Prepare(nil); err != nil {
t.Fatalf("shouldn't have err: %s", err)
}
}
func TestAccessConfigPrepare_RegionRestricted(t *testing.T) {
c := testAccessConfig()
// Create a Session with a custom region
c.session = session.Must(session.NewSession(&aws.Config{
Region: aws.String("us-gov-west-1"),
}))
if err := c.Prepare(nil); err != nil {
t.Fatalf("shouldn't have err: %s", err)
}
if !c.IsGovCloud() {
t.Fatal("We should be in gov region.")
}
}

View File

@ -1,33 +1,132 @@
//go:generate struct-markdown
package common
import (
"fmt"
"log"
"regexp"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/template/interpolate"
)
// AMIConfig is for common configuration related to creating AMIs.
type AMIConfig struct {
AMIName string `mapstructure:"ami_name"`
AMIDescription string `mapstructure:"ami_description"`
AMIVirtType string `mapstructure:"ami_virtualization_type"`
AMIUsers []string `mapstructure:"ami_users"`
AMIGroups []string `mapstructure:"ami_groups"`
AMIProductCodes []string `mapstructure:"ami_product_codes"`
AMIRegions []string `mapstructure:"ami_regions"`
AMISkipRegionValidation bool `mapstructure:"skip_region_validation"`
AMITags map[string]string `mapstructure:"tags"`
AMIENASupport bool `mapstructure:"ena_support"`
AMISriovNetSupport bool `mapstructure:"sriov_support"`
AMIForceDeregister bool `mapstructure:"force_deregister"`
AMIForceDeleteSnapshot bool `mapstructure:"force_delete_snapshot"`
AMIEncryptBootVolume bool `mapstructure:"encrypt_boot"`
AMIKmsKeyId string `mapstructure:"kms_key_id"`
AMIRegionKMSKeyIDs map[string]string `mapstructure:"region_kms_key_ids"`
SnapshotTags map[string]string `mapstructure:"snapshot_tags"`
SnapshotUsers []string `mapstructure:"snapshot_users"`
SnapshotGroups []string `mapstructure:"snapshot_groups"`
// The name of the resulting AMI that will appear when managing AMIs in the
// AWS console or via APIs. This must be unique. To help make this unique,
// use a function like timestamp (see [template
// engine](../templates/engine.html) for more info).
AMIName string `mapstructure:"ami_name" required:"true"`
// The description to set for the resulting
// AMI(s). By default this description is empty. This is a template
// engine, see Build template
// data for more information.
AMIDescription string `mapstructure:"ami_description" required:"false"`
// The description to set for the resulting AMI(s). By default this
// description is empty. This is a [template
// engine](../templates/engine.html), see [Build template
// data](#build-template-data) for more information.
AMIVirtType string `mapstructure:"ami_virtualization_type" required:"false"`
// A list of account IDs that have access to
// launch the resulting AMI(s). By default no additional users other than the
// user creating the AMI has permissions to launch it.
AMIUsers []string `mapstructure:"ami_users" required:"false"`
// A list of groups that have access to
// launch the resulting AMI(s). By default no groups have permission to launch
// the AMI. all will make the AMI publicly accessible.
AMIGroups []string `mapstructure:"ami_groups" required:"false"`
// A list of product codes to
// associate with the AMI. By default no product codes are associated with the
// AMI.
AMIProductCodes []string `mapstructure:"ami_product_codes" required:"false"`
// A list of regions to copy the AMI to.
// Tags and attributes are copied along with the AMI. AMI copying takes time
// depending on the size of the AMI, but will generally take many minutes.
AMIRegions []string `mapstructure:"ami_regions" required:"false"`
// Set to true if you want to skip
// validation of the ami_regions configuration option. Default false.
AMISkipRegionValidation bool `mapstructure:"skip_region_validation" required:"false"`
// Tags applied to the AMI. This is a
// [template engine](/docs/templates/engine.html), see [Build template
// data](#build-template-data) for more information.
AMITags TagMap `mapstructure:"tags" required:"false"`
// Enable enhanced networking (ENA but not SriovNetSupport) on
// HVM-compatible AMIs. If set, add `ec2:ModifyInstanceAttribute` to your
// AWS IAM policy.
//
// Note: you must make sure enhanced networking is enabled on your
// instance. See [Amazon's documentation on enabling enhanced
// networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
AMIENASupport config.Trilean `mapstructure:"ena_support" required:"false"`
// Enable enhanced networking (SriovNetSupport but not ENA) on
// HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your
// AWS IAM policy. Note: you must make sure enhanced networking is enabled
// on your instance. See [Amazon's documentation on enabling enhanced
// networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
// Default `false`.
AMISriovNetSupport bool `mapstructure:"sriov_support" required:"false"`
// Force Packer to first deregister an existing
// AMI if one with the same name already exists. Default false.
AMIForceDeregister bool `mapstructure:"force_deregister" required:"false"`
// Force Packer to delete snapshots
// associated with AMIs, which have been deregistered by force_deregister.
// Default false.
AMIForceDeleteSnapshot bool `mapstructure:"force_delete_snapshot" required:"false"`
// Whether or not to encrypt the resulting AMI when
// copying a provisioned instance to an AMI. By default, Packer will keep the
// encryption setting to what it was in the source image. Setting false will
// result in an unencrypted image, and true will result in an encrypted one.
// If you have used the `launch_block_device_mappings` to set an encryption
// key and that key is the same as the one you want the image encrypted with
// at the end, then you don't need to set this field; leaving it empty will
// prevent an unnecessary extra copy step and save you some time.
AMIEncryptBootVolume config.Trilean `mapstructure:"encrypt_boot" required:"false"`
// ID, alias or ARN of the KMS key to use for boot volume encryption. This
// only applies to the main `region`, other regions where the AMI will be
// copied will be encrypted by the default EBS KMS key. For valid formats
// see *KmsKeyId* in the [AWS API docs -
// CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
// This field is validated by Packer, when using an alias, you will have to
// prefix `kms_key_id` with `alias/`.
AMIKmsKeyId string `mapstructure:"kms_key_id" required:"false"`
// regions to copy the ami to, along with the custom kms key id (alias or
// arn) to use for encryption for that region. Keys must match the regions
// provided in `ami_regions`. If you just want to encrypt using a default
// ID, you can stick with `kms_key_id` and `ami_regions`. If you want a
// region to be encrypted with that region's default key ID, you can use an
// empty string `""` instead of a key id in this map. (e.g. `"us-east-1":
// ""`) However, you cannot use default key IDs if you are using this in
// conjunction with `snapshot_users` -- in that situation you must use
// custom keys. For valid formats see *KmsKeyId* in the [AWS API docs -
// CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
//
// This option supercedes the `kms_key_id` option -- if you set both, and
// they are different, Packer will respect the value in
// `region_kms_key_ids` for your build region and silently disregard the
// value provided in `kms_key_id`.
AMIRegionKMSKeyIDs map[string]string `mapstructure:"region_kms_key_ids" required:"false"`
// If true, Packer will not check whether an AMI with the `ami_name` exists
// in the region it is building in. It will use an intermediary AMI name,
// which it will not convert to an AMI in the build region. It will copy
// the intermediary AMI into any regions provided in `ami_regions`, then
// delete the intermediary AMI. Default `false`.
AMISkipBuildRegion bool `mapstructure:"skip_save_build_region"`
// Tags to apply to snapshot.
// They will override AMI tags if already applied to snapshot. This is a
// [template engine](../templates/engine.html), see [Build template
// data](#build-template-data) for more information.
SnapshotTags TagMap `mapstructure:"snapshot_tags" required:"false"`
// A list of account IDs that have
// access to create volumes from the snapshot(s). By default no additional
// users other than the user creating the AMI has permissions to create
// volumes from the backing snapshot(s).
SnapshotUsers []string `mapstructure:"snapshot_users" required:"false"`
// A list of groups that have access to
// create volumes from the snapshot(s). By default no groups have permission
// to create volumes from the snapshot(s). all will make the snapshot
// publicly accessible.
SnapshotGroups []string `mapstructure:"snapshot_groups" required:"false"`
}
func stringInSlice(s []string, searchstr string) bool {
@ -56,51 +155,41 @@ func (c *AMIConfig) Prepare(accessConfig *AccessConfig, ctx *interpolate.Context
}
}
if len(c.AMIRegions) > 0 {
regionSet := make(map[string]struct{})
regions := make([]string, 0, len(c.AMIRegions))
errs = append(errs, c.prepareRegions(accessConfig)...)
for _, region := range c.AMIRegions {
// If we already saw the region, then don't look again
if _, ok := regionSet[region]; ok {
continue
}
// Mark that we saw the region
regionSet[region] = struct{}{}
if !c.AMISkipRegionValidation {
// Verify the region is real
if valid := ValidateRegion(region); !valid {
errs = append(errs, fmt.Errorf("Unknown region: %s", region))
}
}
// Make sure that if we have region_kms_key_ids defined,
// the regions in ami_regions are also in region_kms_key_ids
if len(c.AMIRegionKMSKeyIDs) > 0 {
if _, ok := c.AMIRegionKMSKeyIDs[region]; !ok {
errs = append(errs, fmt.Errorf("Region %s is in ami_regions but not in region_kms_key_ids", region))
}
}
if (accessConfig != nil) && (region == accessConfig.RawRegion) {
// make sure we don't try to copy to the region we originally
// create the AMI in.
log.Printf("Cannot copy AMI to AWS session region '%s', deleting it from `ami_regions`.", region)
continue
}
regions = append(regions, region)
// Prevent sharing of default KMS key encrypted volumes with other aws users
if len(c.AMIUsers) > 0 {
if len(c.AMIKmsKeyId) == 0 && c.AMIEncryptBootVolume.True() {
errs = append(errs, fmt.Errorf("Cannot share AMI encrypted with default KMS key"))
}
if len(c.AMIRegionKMSKeyIDs) > 0 {
for _, kmsKey := range c.AMIRegionKMSKeyIDs {
if len(kmsKey) == 0 {
errs = append(errs, fmt.Errorf("Cannot share AMI encrypted with default KMS key for other regions"))
}
}
}
c.AMIRegions = regions
}
if len(c.AMIUsers) > 0 && c.AMIEncryptBootVolume {
errs = append(errs, fmt.Errorf("Cannot share AMI with encrypted boot volume"))
var kmsKeys []string
if len(c.AMIKmsKeyId) > 0 {
kmsKeys = append(kmsKeys, c.AMIKmsKeyId)
}
if len(c.AMIRegionKMSKeyIDs) > 0 {
for _, kmsKey := range c.AMIRegionKMSKeyIDs {
if len(kmsKey) == 0 {
kmsKeys = append(kmsKeys, c.AMIKmsKeyId)
}
}
}
for _, kmsKey := range kmsKeys {
if !validateKmsKey(kmsKey) {
errs = append(errs, fmt.Errorf("%s is not a valid KMS Key Id.", kmsKey))
}
}
if len(c.SnapshotUsers) > 0 {
if len(c.AMIKmsKeyId) == 0 && c.AMIEncryptBootVolume {
if len(c.AMIKmsKeyId) == 0 && c.AMIEncryptBootVolume.True() {
errs = append(errs, fmt.Errorf("Cannot share snapshot encrypted with default KMS key"))
}
if len(c.AMIRegionKMSKeyIDs) > 0 {
@ -120,7 +209,7 @@ func (c *AMIConfig) Prepare(accessConfig *AccessConfig, ctx *interpolate.Context
errs = append(errs, fmt.Errorf("AMIName should only contain "+
"alphanumeric characters, parentheses (()), square brackets ([]), spaces "+
"( ), periods (.), slashes (/), dashes (-), single quotes ('), at-signs "+
"(@), or underscores(_). You can use the `clean_ami_name` template "+
"(@), or underscores(_). You can use the `clean_resource_name` template "+
"filter to automatically clean your ami name."))
}
@ -130,3 +219,58 @@ func (c *AMIConfig) Prepare(accessConfig *AccessConfig, ctx *interpolate.Context
return nil
}
func (c *AMIConfig) prepareRegions(accessConfig *AccessConfig) (errs []error) {
if len(c.AMIRegions) > 0 {
regionSet := make(map[string]struct{})
regions := make([]string, 0, len(c.AMIRegions))
for _, region := range c.AMIRegions {
// If we already saw the region, then don't look again
if _, ok := regionSet[region]; ok {
continue
}
// Mark that we saw the region
regionSet[region] = struct{}{}
// Make sure that if we have region_kms_key_ids defined,
// the regions in ami_regions are also in region_kms_key_ids
if len(c.AMIRegionKMSKeyIDs) > 0 {
if _, ok := c.AMIRegionKMSKeyIDs[region]; !ok {
errs = append(errs, fmt.Errorf("Region %s is in ami_regions but not in region_kms_key_ids", region))
}
}
if (accessConfig != nil) && (region == accessConfig.RawRegion) {
// make sure we don't try to copy to the region we originally
// create the AMI in.
log.Printf("Cannot copy AMI to AWS session region '%s', deleting it from `ami_regions`.", region)
continue
}
regions = append(regions, region)
}
c.AMIRegions = regions
}
return errs
}
// See https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html
func validateKmsKey(kmsKey string) (valid bool) {
kmsKeyIdPattern := `[a-f0-9-]+$`
aliasPattern := `alias/[a-zA-Z0-9:/_-]+$`
kmsArnStartPattern := `^arn:aws(-us-gov)?:kms:([a-z]{2}-(gov-)?[a-z]+-\d{1})?:(\d{12}):`
if regexp.MustCompile(fmt.Sprintf("^%s", kmsKeyIdPattern)).MatchString(kmsKey) {
return true
}
if regexp.MustCompile(fmt.Sprintf("^%s", aliasPattern)).MatchString(kmsKey) {
return true
}
if regexp.MustCompile(fmt.Sprintf("%skey/%s", kmsArnStartPattern, kmsKeyIdPattern)).MatchString(kmsKey) {
return true
}
if regexp.MustCompile(fmt.Sprintf("%s%s", kmsArnStartPattern, aliasPattern)).MatchString(kmsKey) {
return true
}
return false
}

View File

@ -1,8 +1,14 @@
package common
import (
"fmt"
"reflect"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
"github.com/hashicorp/packer/helper/config"
)
func testAMIConfig() *AMIConfig {
@ -12,43 +18,62 @@ func testAMIConfig() *AMIConfig {
}
func getFakeAccessConfig(region string) *AccessConfig {
return &AccessConfig{
RawRegion: region,
}
c := testAccessConfig()
c.RawRegion = region
return c
}
func TestAMIConfigPrepare_name(t *testing.T) {
c := testAMIConfig()
if err := c.Prepare(nil, nil); err != nil {
accessConf := testAccessConfig()
if err := c.Prepare(accessConf, nil); err != nil {
t.Fatalf("shouldn't have err: %s", err)
}
c.AMIName = ""
if err := c.Prepare(nil, nil); err == nil {
if err := c.Prepare(accessConf, nil); err == nil {
t.Fatal("should have error")
}
}
type mockEC2Client struct {
ec2iface.EC2API
}
func (m *mockEC2Client) DescribeRegions(*ec2.DescribeRegionsInput) (*ec2.DescribeRegionsOutput, error) {
return &ec2.DescribeRegionsOutput{
Regions: []*ec2.Region{
{RegionName: aws.String("us-east-1")},
{RegionName: aws.String("us-east-2")},
{RegionName: aws.String("us-west-1")},
},
}, nil
}
func TestAMIConfigPrepare_regions(t *testing.T) {
c := testAMIConfig()
c.AMIRegions = nil
if err := c.Prepare(nil, nil); err != nil {
t.Fatalf("shouldn't have err: %s", err)
var errs []error
var err error
accessConf := testAccessConfig()
mockConn := &mockEC2Client{}
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
t.Fatalf("shouldn't have err: %#v", errs)
}
c.AMIRegions = listEC2Regions()
if err := c.Prepare(nil, nil); err != nil {
t.Fatalf("shouldn't have err: %s", err)
c.AMIRegions, err = listEC2Regions(mockConn)
if err != nil {
t.Fatalf("shouldn't have err: %s", err.Error())
}
c.AMIRegions = []string{"foo"}
if err := c.Prepare(nil, nil); err == nil {
t.Fatal("should have error")
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
t.Fatalf("shouldn't have err: %#v", errs)
}
errs = errs[:0]
c.AMIRegions = []string{"us-east-1", "us-west-1", "us-east-1"}
if err := c.Prepare(nil, nil); err != nil {
t.Fatalf("bad: %s", err)
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
t.Fatalf("bad: %s", errs[0])
}
expected := []string{"us-east-1", "us-west-1"}
@ -57,11 +82,9 @@ func TestAMIConfigPrepare_regions(t *testing.T) {
}
c.AMIRegions = []string{"custom"}
c.AMISkipRegionValidation = true
if err := c.Prepare(nil, nil); err != nil {
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
t.Fatal("shouldn't have error")
}
c.AMISkipRegionValidation = false
c.AMIRegions = []string{"us-east-1", "us-east-2", "us-west-1"}
c.AMIRegionKMSKeyIDs = map[string]string{
@ -69,8 +92,8 @@ func TestAMIConfigPrepare_regions(t *testing.T) {
"us-west-1": "789-012-3456",
"us-east-2": "456-789-0123",
}
if err := c.Prepare(nil, nil); err != nil {
t.Fatal("shouldn't have error")
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
t.Fatal(fmt.Sprintf("shouldn't have error: %s", errs[0]))
}
c.AMIRegions = []string{"us-east-1", "us-east-2", "us-west-1"}
@ -79,7 +102,7 @@ func TestAMIConfigPrepare_regions(t *testing.T) {
"us-west-1": "789-012-3456",
"us-east-2": "",
}
if err := c.Prepare(nil, nil); err != nil {
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
t.Fatal("should have passed; we are able to use default KMS key if not sharing")
}
@ -90,7 +113,7 @@ func TestAMIConfigPrepare_regions(t *testing.T) {
"us-west-1": "789-012-3456",
"us-east-2": "",
}
if err := c.Prepare(nil, nil); err == nil {
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
t.Fatal("should have an error b/c can't use default KMS key if sharing")
}
@ -100,7 +123,7 @@ func TestAMIConfigPrepare_regions(t *testing.T) {
"us-west-1": "789-012-3456",
"us-east-2": "456-789-0123",
}
if err := c.Prepare(nil, nil); err == nil {
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
t.Fatal("should have error b/c theres a region in the key map that isn't in ami_regions")
}
@ -109,27 +132,28 @@ func TestAMIConfigPrepare_regions(t *testing.T) {
"us-east-1": "123-456-7890",
"us-west-1": "789-012-3456",
}
if err := c.Prepare(nil, nil); err == nil {
if err := c.Prepare(accessConf, nil); err == nil {
t.Fatal("should have error b/c theres a region in in ami_regions that isn't in the key map")
}
c.SnapshotUsers = []string{"foo", "bar"}
c.AMIKmsKeyId = "123-abc-456"
c.AMIEncryptBootVolume = true
c.AMIEncryptBootVolume = config.TriTrue
c.AMIRegions = []string{"us-east-1", "us-west-1"}
c.AMIRegionKMSKeyIDs = map[string]string{
"us-east-1": "123-456-7890",
"us-west-1": "",
}
if err := c.Prepare(nil, nil); err == nil {
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
t.Fatal("should have error b/c theres a region in in ami_regions that isn't in the key map")
}
// allow rawregion to exist in ami_regions list.
accessConf := getFakeAccessConfig("us-east-1")
accessConf = getFakeAccessConfig("us-east-1")
c.AMIRegions = []string{"us-east-1", "us-west-1", "us-east-2"}
c.AMIRegionKMSKeyIDs = nil
if err := c.Prepare(accessConf, nil); err != nil {
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
t.Fatal("should allow user to have the raw region in ami_regions")
}
@ -138,24 +162,64 @@ func TestAMIConfigPrepare_regions(t *testing.T) {
func TestAMIConfigPrepare_Share_EncryptedBoot(t *testing.T) {
c := testAMIConfig()
c.AMIUsers = []string{"testAccountID"}
c.AMIEncryptBootVolume = true
c.AMIEncryptBootVolume = config.TriTrue
accessConf := testAccessConfig()
c.AMIKmsKeyId = ""
if err := c.Prepare(nil, nil); err == nil {
if err := c.Prepare(accessConf, nil); err == nil {
t.Fatal("shouldn't be able to share ami with encrypted boot volume")
}
c.AMIKmsKeyId = "89c3fb9a-de87-4f2a-aedc-fddc5138193c"
if err := c.Prepare(accessConf, nil); err != nil {
t.Fatal("should be able to share ami with encrypted boot volume")
}
}
func TestAMIConfigPrepare_ValidateKmsKey(t *testing.T) {
c := testAMIConfig()
c.AMIEncryptBootVolume = config.TriTrue
accessConf := testAccessConfig()
validCases := []string{
"abcd1234-e567-890f-a12b-a123b4cd56ef",
"alias/foo/bar",
"arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef",
"arn:aws:kms:us-east-1:012345678910:alias/foo/bar",
"arn:aws-us-gov:kms:us-gov-east-1:123456789012:key/12345678-1234-abcd-0000-123456789012",
}
for _, validCase := range validCases {
c.AMIKmsKeyId = validCase
if err := c.Prepare(accessConf, nil); err != nil {
t.Fatalf("%s should not have failed KMS key validation", validCase)
}
}
c.AMIKmsKeyId = "89c3fb9a-de87-4f2a-aedc-fddc5138193c"
if err := c.Prepare(nil, nil); err == nil {
t.Fatal("shouldn't be able to share ami with encrypted boot volume")
invalidCases := []string{
"ABCD1234-e567-890f-a12b-a123b4cd56ef",
"ghij1234-e567-890f-a12b-a123b4cd56ef",
"ghij1234+e567_890f-a12b-a123b4cd56ef",
"foo/bar",
"arn:aws:kms:us-east-1:012345678910:foo/bar",
"arn:foo:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef",
}
for _, invalidCase := range invalidCases {
c.AMIKmsKeyId = invalidCase
if err := c.Prepare(accessConf, nil); err == nil {
t.Fatalf("%s should have failed KMS key validation", invalidCase)
}
}
}
func TestAMINameValidation(t *testing.T) {
c := testAMIConfig()
accessConf := testAccessConfig()
c.AMIName = "aa"
if err := c.Prepare(nil, nil); err == nil {
if err := c.Prepare(accessConf, nil); err == nil {
t.Fatal("shouldn't be able to have an ami name with less than 3 characters")
}
@ -164,22 +228,22 @@ func TestAMINameValidation(t *testing.T) {
longAmiName += "a"
}
c.AMIName = longAmiName
if err := c.Prepare(nil, nil); err == nil {
if err := c.Prepare(accessConf, nil); err == nil {
t.Fatal("shouldn't be able to have an ami name with great than 128 characters")
}
c.AMIName = "+aaa"
if err := c.Prepare(nil, nil); err == nil {
if err := c.Prepare(accessConf, nil); err == nil {
t.Fatal("shouldn't be able to have an ami name with invalid characters")
}
c.AMIName = "fooBAR1()[] ./-'@_"
if err := c.Prepare(nil, nil); err != nil {
if err := c.Prepare(accessConf, nil); err != nil {
t.Fatal("should be able to use all of the allowed AMI characters")
}
c.AMIName = `xyz-base-2017-04-05-1934`
if err := c.Prepare(nil, nil); err != nil {
if err := c.Prepare(accessConf, nil); err != nil {
t.Fatalf("expected `xyz-base-2017-04-05-1934` to pass validation.")
}

View File

@ -1,94 +1,163 @@
//go:generate struct-markdown
package common
import (
"fmt"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/template/interpolate"
)
// BlockDevice
// These will be attached when booting a new instance from your AMI. Your
// options here may vary depending on the type of VM you use.
//
// Example use case:
//
// The following mapping will tell Packer to encrypt the root volume of the
// build instance at launch using a specific non-default kms key:
//
// ``` json
// "launch_block_device_mappings": [{
// "device_name": "/dev/sda1",
// "encrypted": true,
// "kms_key_id": "1a2b3c4d-5e6f-1a2b-3c4d-5e6f1a2b3c4d"
// }]
// ```
//
// Documentation for Block Devices Mappings can be found here:
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
//
// These mappings give you control over either the volumes generated for the
// Packer build via `launch_block_device_mappings`, or the volumes that Packer will
// save with the artifact AMI via `ami_block_device_mappings`.
type BlockDevice struct {
DeleteOnTermination bool `mapstructure:"delete_on_termination"`
DeviceName string `mapstructure:"device_name"`
Encrypted bool `mapstructure:"encrypted"`
IOPS int64 `mapstructure:"iops"`
NoDevice bool `mapstructure:"no_device"`
SnapshotId string `mapstructure:"snapshot_id"`
VirtualName string `mapstructure:"virtual_name"`
VolumeType string `mapstructure:"volume_type"`
VolumeSize int64 `mapstructure:"volume_size"`
// Indicates whether the EBS volume is deleted on instance termination.
// Default false. NOTE: If this value is not explicitly set to true and
// volumes are not cleaned up by an alternative method, additional volumes
// will accumulate after every build.
DeleteOnTermination bool `mapstructure:"delete_on_termination" required:"false"`
// The device name exposed to the instance (for example, /dev/sdh or xvdh).
// Required for every device in the block device mapping.
DeviceName string `mapstructure:"device_name" required:"false"`
// Indicates whether or not to encrypt the volume. By default, Packer will
// keep the encryption setting to what it was in the source image. Setting
// false will result in an unencrypted device, and true will result in an
// encrypted one.
Encrypted config.Trilean `mapstructure:"encrypted" required:"false"`
// The number of I/O operations per second (IOPS) that the volume supports.
// See the documentation on
// [IOPs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html)
// for more information
IOPS int64 `mapstructure:"iops" required:"false"`
// Suppresses the specified device included in the block device mapping of
// the AMI.
NoDevice bool `mapstructure:"no_device" required:"false"`
// The ID of the snapshot.
SnapshotId string `mapstructure:"snapshot_id" required:"false"`
// The virtual device name. See the documentation on Block Device Mapping
// for more information.
VirtualName string `mapstructure:"virtual_name" required:"false"`
// The volume type. gp2 for General Purpose (SSD) volumes, io1 for
// Provisioned IOPS (SSD) volumes, st1 for Throughput Optimized HDD, sc1
// for Cold HDD, and standard for Magnetic volumes.
VolumeType string `mapstructure:"volume_type" required:"false"`
// The size of the volume, in GiB. Required if not specifying a
// snapshot_id.
VolumeSize int64 `mapstructure:"volume_size" required:"false"`
// ID, alias or ARN of the KMS key to use for boot volume encryption. This
// only applies to the main region, other regions where the AMI will be
// copied will be encrypted by the default EBS KMS key. For valid formats
// see KmsKeyId in the [AWS API docs -
// CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html)
// This field is validated by Packer, when using an alias, you will have to
// prefix kms_key_id with alias/.
KmsKeyId string `mapstructure:"kms_key_id" required:"false"`
}
type BlockDevices struct {
AMIBlockDevices `mapstructure:",squash"`
LaunchBlockDevices `mapstructure:",squash"`
}
type BlockDevices []BlockDevice
type AMIBlockDevices struct {
AMIMappings []BlockDevice `mapstructure:"ami_block_device_mappings"`
}
type LaunchBlockDevices struct {
LaunchMappings []BlockDevice `mapstructure:"launch_block_device_mappings"`
}
func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping {
func (bds BlockDevices) BuildEC2BlockDeviceMappings() []*ec2.BlockDeviceMapping {
var blockDevices []*ec2.BlockDeviceMapping
for _, blockDevice := range b {
mapping := &ec2.BlockDeviceMapping{
DeviceName: aws.String(blockDevice.DeviceName),
}
if blockDevice.NoDevice {
mapping.NoDevice = aws.String("")
} else if blockDevice.VirtualName != "" {
if strings.HasPrefix(blockDevice.VirtualName, "ephemeral") {
mapping.VirtualName = aws.String(blockDevice.VirtualName)
}
} else {
ebsBlockDevice := &ec2.EbsBlockDevice{
DeleteOnTermination: aws.Bool(blockDevice.DeleteOnTermination),
}
if blockDevice.VolumeType != "" {
ebsBlockDevice.VolumeType = aws.String(blockDevice.VolumeType)
}
if blockDevice.VolumeSize > 0 {
ebsBlockDevice.VolumeSize = aws.Int64(blockDevice.VolumeSize)
}
// IOPS is only valid for io1 type
if blockDevice.VolumeType == "io1" {
ebsBlockDevice.Iops = aws.Int64(blockDevice.IOPS)
}
// You cannot specify Encrypted if you specify a Snapshot ID
if blockDevice.SnapshotId != "" {
ebsBlockDevice.SnapshotId = aws.String(blockDevice.SnapshotId)
} else if blockDevice.Encrypted {
ebsBlockDevice.Encrypted = aws.Bool(blockDevice.Encrypted)
}
mapping.Ebs = ebsBlockDevice
}
blockDevices = append(blockDevices, mapping)
for _, blockDevice := range bds {
blockDevices = append(blockDevices, blockDevice.BuildEC2BlockDeviceMapping())
}
return blockDevices
}
func (b *BlockDevices) Prepare(ctx *interpolate.Context) []error {
return nil
func (blockDevice BlockDevice) BuildEC2BlockDeviceMapping() *ec2.BlockDeviceMapping {
mapping := &ec2.BlockDeviceMapping{
DeviceName: aws.String(blockDevice.DeviceName),
}
if blockDevice.NoDevice {
mapping.NoDevice = aws.String("")
return mapping
} else if blockDevice.VirtualName != "" {
if strings.HasPrefix(blockDevice.VirtualName, "ephemeral") {
mapping.VirtualName = aws.String(blockDevice.VirtualName)
}
return mapping
}
ebsBlockDevice := &ec2.EbsBlockDevice{
DeleteOnTermination: aws.Bool(blockDevice.DeleteOnTermination),
}
if blockDevice.VolumeType != "" {
ebsBlockDevice.VolumeType = aws.String(blockDevice.VolumeType)
}
if blockDevice.VolumeSize > 0 {
ebsBlockDevice.VolumeSize = aws.Int64(blockDevice.VolumeSize)
}
// IOPS is only valid for io1 type
if blockDevice.VolumeType == "io1" {
ebsBlockDevice.Iops = aws.Int64(blockDevice.IOPS)
}
// You cannot specify Encrypted if you specify a Snapshot ID
if blockDevice.SnapshotId != "" {
ebsBlockDevice.SnapshotId = aws.String(blockDevice.SnapshotId)
}
ebsBlockDevice.Encrypted = blockDevice.Encrypted.ToBoolPointer()
if blockDevice.KmsKeyId != "" {
ebsBlockDevice.KmsKeyId = aws.String(blockDevice.KmsKeyId)
}
mapping.Ebs = ebsBlockDevice
return mapping
}
func (b *AMIBlockDevices) BuildAMIDevices() []*ec2.BlockDeviceMapping {
return buildBlockDevices(b.AMIMappings)
func (b *BlockDevice) Prepare(ctx *interpolate.Context) error {
if b.DeviceName == "" {
return fmt.Errorf("The `device_name` must be specified " +
"for every device in the block device mapping.")
}
// Warn that encrypted must be true or nil when setting kms_key_id
if b.KmsKeyId != "" && b.Encrypted.False() {
return fmt.Errorf("The device %v, must also have `encrypted: "+
"true` when setting a kms_key_id.", b.DeviceName)
}
_, err := interpolate.RenderInterface(&b, ctx)
return err
}
func (b *LaunchBlockDevices) BuildLaunchDevices() []*ec2.BlockDeviceMapping {
return buildBlockDevices(b.LaunchMappings)
func (bds BlockDevices) Prepare(ctx *interpolate.Context) (errs []error) {
for _, block := range bds {
if err := block.Prepare(ctx); err != nil {
errs = append(errs, err)
}
}
return errs
}

View File

@ -1,11 +1,12 @@
package common
import (
"reflect"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/google/go-cmp/cmp"
"github.com/hashicorp/packer/helper/config"
)
func TestBlockDevice(t *testing.T) {
@ -71,7 +72,7 @@ func TestBlockDevice(t *testing.T) {
VolumeType: "gp2",
VolumeSize: 8,
DeleteOnTermination: true,
Encrypted: true,
Encrypted: config.TriTrue,
},
Result: &ec2.BlockDeviceMapping{
@ -84,6 +85,27 @@ func TestBlockDevice(t *testing.T) {
},
},
},
{
Config: &BlockDevice{
DeviceName: "/dev/sdb",
VolumeType: "gp2",
VolumeSize: 8,
DeleteOnTermination: true,
Encrypted: config.TriTrue,
KmsKeyId: "2Fa48a521f-3aff-4b34-a159-376ac5d37812",
},
Result: &ec2.BlockDeviceMapping{
DeviceName: aws.String("/dev/sdb"),
Ebs: &ec2.EbsBlockDevice{
VolumeType: aws.String("gp2"),
VolumeSize: aws.Int64(8),
DeleteOnTermination: aws.Bool(true),
Encrypted: aws.Bool(true),
KmsKeyId: aws.String("2Fa48a521f-3aff-4b34-a159-376ac5d37812"),
},
},
},
{
Config: &BlockDevice{
DeviceName: "/dev/sdb",
@ -124,26 +146,20 @@ func TestBlockDevice(t *testing.T) {
}
for _, tc := range cases {
amiBlockDevices := AMIBlockDevices{
AMIMappings: []BlockDevice{*tc.Config},
}
var amiBlockDevices BlockDevices = []BlockDevice{*tc.Config}
launchBlockDevices := LaunchBlockDevices{
LaunchMappings: []BlockDevice{*tc.Config},
}
var launchBlockDevices BlockDevices = []BlockDevice{*tc.Config}
expected := []*ec2.BlockDeviceMapping{tc.Result}
amiResults := amiBlockDevices.BuildAMIDevices()
if !reflect.DeepEqual(expected, amiResults) {
t.Fatalf("Bad block device, \nexpected: %#v\n\ngot: %#v",
expected, amiResults)
amiResults := amiBlockDevices.BuildEC2BlockDeviceMappings()
if diff := cmp.Diff(expected, amiResults); diff != "" {
t.Fatalf("Bad block device: %s", diff)
}
launchResults := launchBlockDevices.BuildLaunchDevices()
if !reflect.DeepEqual(expected, launchResults) {
t.Fatalf("Bad block device, \nexpected: %#v\n\ngot: %#v",
expected, launchResults)
launchResults := launchBlockDevices.BuildEC2BlockDeviceMappings()
if diff := cmp.Diff(expected, launchResults); diff != "" {
t.Fatalf("Bad block device: %s", diff)
}
}
}

View File

@ -0,0 +1,17 @@
package common
import (
"github.com/aws/aws-sdk-go/service/ec2"
)
// Build a slice of EC2 (AMI/Subnet/VPC) filter options from the filters provided.
func buildEc2Filters(input map[*string]*string) []*ec2.Filter {
var filters []*ec2.Filter
for k, v := range input {
filters = append(filters, &ec2.Filter{
Name: k,
Values: []*string{v},
})
}
return filters
}

View File

@ -0,0 +1,58 @@
package common
import (
"fmt"
"log"
"regexp"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sts"
)
var encodedFailureMessagePattern = regexp.MustCompile(`(?i)(.*) Encoded authorization failure message: ([\w-]+) ?( .*)?`)
type stsDecoder interface {
DecodeAuthorizationMessage(input *sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error)
}
// decodeError replaces encoded authorization messages with the
// decoded results
func decodeAWSError(decoder stsDecoder, err error) error {
groups := encodedFailureMessagePattern.FindStringSubmatch(err.Error())
if groups != nil && len(groups) > 1 {
result, decodeErr := decoder.DecodeAuthorizationMessage(&sts.DecodeAuthorizationMessageInput{
EncodedMessage: aws.String(groups[2]),
})
if decodeErr == nil {
msg := aws.StringValue(result.DecodedMessage)
return fmt.Errorf("%s Authorization failure message: '%s'%s", groups[1], msg, groups[3])
}
log.Printf("[WARN] Attempted to decode authorization message, but received: %v", decodeErr)
}
return err
}
// DecodeAuthZMessages enables automatic decoding of any
// encoded authorization messages
func DecodeAuthZMessages(sess *session.Session) {
azd := &authZMessageDecoder{
Decoder: sts.New(sess),
}
sess.Handlers.UnmarshalError.AfterEachFn = azd.afterEachFn
}
type authZMessageDecoder struct {
Decoder stsDecoder
}
func (a *authZMessageDecoder) afterEachFn(item request.HandlerListRunItem) bool {
if err, ok := item.Request.Error.(awserr.Error); ok && err.Code() == "UnauthorizedOperation" {
item.Request.Error = decodeAWSError(a.Decoder, err)
}
return true
}

View File

@ -0,0 +1,70 @@
package common
import (
"fmt"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/aws/aws-sdk-go/aws/awserr"
)
type mockSTS struct {
}
func (m *mockSTS) DecodeAuthorizationMessage(input *sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) {
return &sts.DecodeAuthorizationMessageOutput{
DecodedMessage: aws.String(`{
"allowed": false,
"explicitDeny": true,
"matchedStatements": {}
}`),
}, nil
}
func TestErrorsParsing_RequestFailure(t *testing.T) {
ae := awserr.New("UnauthorizedOperation",
`You are not authorized to perform this operation. Encoded authorization failure message: D9Q7oicjOMr9l2CC-NPP1FiZXK9Ijia1k-3l0siBFCcrK3oSuMFMkBIO5TNj0HdXE-WfwnAcdycFOohfKroNO6toPJEns8RFVfy_M_IjNGmrEFJ6E62pnmBW0OLrMsXxR9FQE4gB4gJzSM0AD6cV6S3FOfqYzWBRX-sQdOT4HryGkFNRoFBr9Xbp-tRwiadwkbdHdfnV9fbRkXmnwCdULml16NBSofC4ZPepLMKmIB5rKjwk-m179UUh2XA-J5no0si6XcRo5GbHQB5QfCIwSHL4vsro2wLZUd16-8OWKyr3tVlTbQe0ERZskqRqRQ5E28QuiBCVV6XstUyo-T4lBSr75Fgnyr3wCO-dS3b_5Ns3WzA2JD4E2AJOAStXIU8IH5YuKkAg7C-dJMuBMPpmKCBEXhNoHDwCyOo5PsV3xMlc0jSb0qYGpfst_TDDtejcZfn7NssUjxVq9qkdH-OXz2gPoQB-hX8ycmZCL5UZwKc3TCLUr7TGnudHjmnMrE9cUo-yTCWfyHPLprhiYhTCKW18EikJ0O1EKI3FJ_b4F19_jFBPARjSwQc7Ut6MNCVzrPdZGYSF6acj5gPaxdy9uSkVQwWXK7Pd5MFP7EBDE1_DgYbzodgwDO2PXeVFUbSLBHKWo_ebZS9ZX2nYPcGss_sYaly0ZVSIJXp7G58B5BoFVhvVH6jYnF9XiAOjMltuP_ycu1pQP1lki500RY3baLvfeYeAsB38XZHKEgWZzq7Fei-uh89q0cjJTmlVyrfRU3q6`,
fmt.Errorf("You can't do it!!"))
rf := awserr.NewRequestFailure(ae, 400, "abc-def-123-456")
result := decodeAWSError(&mockSTS{}, rf)
if result == nil {
t.Error("Expected resulting error")
}
if !strings.Contains(result.Error(), "Authorization failure message:") {
t.Error("Expected authorization failure message")
}
}
func TestErrorsParsing_NonAuthorizationFailure(t *testing.T) {
ae := awserr.New("BadRequest",
`You did something wrong. Try again`,
fmt.Errorf("Request was no good."))
rf := awserr.NewRequestFailure(ae, 400, "abc-def-123-456")
result := decodeAWSError(&mockSTS{}, rf)
if result == nil {
t.Error("Expected resulting error")
}
if result != rf {
t.Error("Expected original error to be returned unchanged")
}
}
func TestErrorsParsing_NonAWSError(t *testing.T) {
err := fmt.Errorf("Random error occurred")
result := decodeAWSError(&mockSTS{}, err)
if result == nil {
t.Error("Expected resulting error")
}
if result != err {
t.Error("Expected original error to be returned unchanged")
}
}

View File

@ -1,6 +1,36 @@
package common
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/packer/helper/multistep"
)
type BuildInfoTemplate struct {
SourceAMI string
BuildRegion string
BuildRegion string
SourceAMI string
SourceAMIName string
SourceAMITags map[string]string
}
func extractBuildInfo(region string, state multistep.StateBag) *BuildInfoTemplate {
rawSourceAMI, hasSourceAMI := state.GetOk("source_image")
if !hasSourceAMI {
return &BuildInfoTemplate{
BuildRegion: region,
}
}
sourceAMI := rawSourceAMI.(*ec2.Image)
sourceAMITags := make(map[string]string, len(sourceAMI.Tags))
for _, tag := range sourceAMI.Tags {
sourceAMITags[aws.StringValue(tag.Key)] = aws.StringValue(tag.Value)
}
return &BuildInfoTemplate{
BuildRegion: region,
SourceAMI: aws.StringValue(sourceAMI.ImageId),
SourceAMIName: aws.StringValue(sourceAMI.Name),
SourceAMITags: sourceAMITags,
}
}

View File

@ -0,0 +1,63 @@
package common
import (
"reflect"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/packer/helper/multistep"
)
func testImage() *ec2.Image {
return &ec2.Image{
ImageId: aws.String("ami-abcd1234"),
Name: aws.String("ami_test_name"),
Tags: []*ec2.Tag{
{
Key: aws.String("key-1"),
Value: aws.String("value-1"),
},
{
Key: aws.String("key-2"),
Value: aws.String("value-2"),
},
},
}
}
func testState() multistep.StateBag {
state := new(multistep.BasicStateBag)
return state
}
func TestInterpolateBuildInfo_extractBuildInfo_noSourceImage(t *testing.T) {
state := testState()
buildInfo := extractBuildInfo("foo", state)
expected := BuildInfoTemplate{
BuildRegion: "foo",
}
if !reflect.DeepEqual(*buildInfo, expected) {
t.Fatalf("Unexpected BuildInfoTemplate: expected %#v got %#v\n", expected, *buildInfo)
}
}
func TestInterpolateBuildInfo_extractBuildInfo_withSourceImage(t *testing.T) {
state := testState()
state.Put("source_image", testImage())
buildInfo := extractBuildInfo("foo", state)
expected := BuildInfoTemplate{
BuildRegion: "foo",
SourceAMI: "ami-abcd1234",
SourceAMIName: "ami_test_name",
SourceAMITags: map[string]string{
"key-1": "value-1",
"key-2": "value-2",
},
}
if !reflect.DeepEqual(*buildInfo, expected) {
t.Fatalf("Unexpected BuildInfoTemplate: expected %#v got %#v\n", expected, *buildInfo)
}
}

View File

@ -1,36 +1,58 @@
package common
// aws ec2 describe-regions --query 'Regions[].{Name:RegionName}' --output text | sed 's/\(.*\)/"\1",/' | sort
func listEC2Regions() []string {
return []string{
"ap-northeast-1",
"ap-northeast-2",
"ap-south-1",
"ap-southeast-1",
"ap-southeast-2",
"ca-central-1",
"eu-central-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"sa-east-1",
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
// not part of autogenerated list
"us-gov-west-1",
"cn-north-1",
import (
"fmt"
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
)
func listEC2Regions(ec2conn ec2iface.EC2API) ([]string, error) {
var regions []string
resultRegions, err := ec2conn.DescribeRegions(nil)
if err != nil {
return []string{}, err
}
for _, region := range resultRegions.Regions {
regions = append(regions, *region.RegionName)
}
return regions, nil
}
// ValidateRegion returns true if the supplied region is a valid AWS
// region and false if it's not.
func ValidateRegion(region string) bool {
for _, valid := range listEC2Regions() {
if region == valid {
return true
// ValidateRegion returns an nil if the regions are valid
// and exists; otherwise an error.
// ValidateRegion calls ec2conn.DescribeRegions to get the list of
// regions available to this account.
func (c *AccessConfig) ValidateRegion(regions ...string) error {
ec2conn, err := c.NewEC2Connection()
if err != nil {
return err
}
validRegions, err := listEC2Regions(ec2conn)
if err != nil {
return err
}
var invalidRegions []string
for _, region := range regions {
if region == "" {
continue
}
found := false
for _, validRegion := range validRegions {
if region == validRegion {
found = true
break
}
}
if !found {
invalidRegions = append(invalidRegions, region)
}
}
return false
if len(invalidRegions) > 0 {
return fmt.Errorf("Invalid region(s): %v, available regions: %v", invalidRegions, validRegions)
}
return nil
}

View File

@ -1,11 +1,13 @@
//go:generate struct-markdown
package common
import (
"errors"
"fmt"
"net"
"os"
"regexp"
"strings"
"time"
"github.com/hashicorp/packer/common/uuid"
@ -25,36 +27,316 @@ func (d *AmiFilterOptions) Empty() bool {
return len(d.Owners) == 0 && len(d.Filters) == 0
}
func (d *AmiFilterOptions) NoOwner() bool {
return len(d.Owners) == 0
}
type SubnetFilterOptions struct {
Filters map[*string]*string
MostFree bool `mapstructure:"most_free"`
Random bool `mapstructure:"random"`
}
func (d *SubnetFilterOptions) Empty() bool {
return len(d.Filters) == 0
}
type VpcFilterOptions struct {
Filters map[*string]*string
}
func (d *VpcFilterOptions) Empty() bool {
return len(d.Filters) == 0
}
type SecurityGroupFilterOptions struct {
Filters map[*string]*string
}
func (d *SecurityGroupFilterOptions) Empty() bool {
return len(d.Filters) == 0
}
// RunConfig contains configuration for running an instance from a source
// AMI and details on how to access that launched image.
type RunConfig struct {
AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address"`
AvailabilityZone string `mapstructure:"availability_zone"`
EbsOptimized bool `mapstructure:"ebs_optimized"`
IamInstanceProfile string `mapstructure:"iam_instance_profile"`
InstanceType string `mapstructure:"instance_type"`
RunTags map[string]string `mapstructure:"run_tags"`
SourceAmi string `mapstructure:"source_ami"`
SourceAmiFilter AmiFilterOptions `mapstructure:"source_ami_filter"`
SpotPrice string `mapstructure:"spot_price"`
SpotPriceAutoProduct string `mapstructure:"spot_price_auto_product"`
DisableStopInstance bool `mapstructure:"disable_stop_instance"`
SecurityGroupId string `mapstructure:"security_group_id"`
SecurityGroupIds []string `mapstructure:"security_group_ids"`
TemporarySGSourceCidr string `mapstructure:"temporary_security_group_source_cidr"`
SubnetId string `mapstructure:"subnet_id"`
TemporaryKeyPairName string `mapstructure:"temporary_key_pair_name"`
UserData string `mapstructure:"user_data"`
UserDataFile string `mapstructure:"user_data_file"`
WindowsPasswordTimeout time.Duration `mapstructure:"windows_password_timeout"`
VpcId string `mapstructure:"vpc_id"`
InstanceInitiatedShutdownBehavior string `mapstructure:"shutdown_behavior"`
// If using a non-default VPC,
// public IP addresses are not provided by default. If this is true, your
// new instance will get a Public IP. default: false
AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address" required:"false"`
// Destination availability zone to launch
// instance in. Leave this empty to allow Amazon to auto-assign.
AvailabilityZone string `mapstructure:"availability_zone" required:"false"`
// Requires spot_price to be set. The
// required duration for the Spot Instances (also known as Spot blocks). This
// value must be a multiple of 60 (60, 120, 180, 240, 300, or 360). You can't
// specify an Availability Zone group or a launch group if you specify a
// duration.
BlockDurationMinutes int64 `mapstructure:"block_duration_minutes" required:"false"`
// Packer normally stops the build instance after all provisioners have
// run. For Windows instances, it is sometimes desirable to [run
// Sysprep](http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ami-create-standard.html)
// which will stop the instance for you. If this is set to `true`, Packer
// *will not* stop the instance but will assume that you will send the stop
// signal yourself through your final provisioner. You can do this with a
// [windows-shell
// provisioner](https://www.packer.io/docs/provisioners/windows-shell.html).
// Note that Packer will still wait for the instance to be stopped, and
// failing to send the stop signal yourself, when you have set this flag to
// `true`, will cause a timeout.
// Example of a valid shutdown command:
//
// ``` json
// {
// "type": "windows-shell",
// "inline": ["\"c:\\Program Files\\Amazon\\Ec2ConfigService\\ec2config.exe\" -sysprep"]
// }
// ```
DisableStopInstance bool `mapstructure:"disable_stop_instance" required:"false"`
// Mark instance as [EBS
// Optimized](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
// Default `false`.
EbsOptimized bool `mapstructure:"ebs_optimized" required:"false"`
// Enabling T2 Unlimited allows the source instance to burst additional CPU
// beyond its available [CPU
// Credits](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-credits-baseline-concepts.html)
// for as long as the demand exists. This is in contrast to the standard
// configuration that only allows an instance to consume up to its
// available CPU Credits. See the AWS documentation for [T2
// Unlimited](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-unlimited.html)
// and the **T2 Unlimited Pricing** section of the [Amazon EC2 On-Demand
// Pricing](https://aws.amazon.com/ec2/pricing/on-demand/) document for
// more information. By default this option is disabled and Packer will set
// up a [T2
// Standard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-std.html)
// instance instead.
//
// To use T2 Unlimited you must use a T2 instance type, e.g. `t2.micro`.
// Additionally, T2 Unlimited cannot be used in conjunction with Spot
// Instances, e.g. when the `spot_price` option has been configured.
// Attempting to do so will cause an error.
//
// !&gt; **Warning!** Additional costs may be incurred by enabling T2
// Unlimited - even for instances that would usually qualify for the
// [AWS Free Tier](https://aws.amazon.com/free/).
EnableT2Unlimited bool `mapstructure:"enable_t2_unlimited" required:"false"`
// The name of an [IAM instance
// profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html)
// to launch the EC2 instance with.
IamInstanceProfile string `mapstructure:"iam_instance_profile" required:"false"`
// Automatically terminate instances on
// shutdown in case Packer exits ungracefully. Possible values are stop and
// terminate. Defaults to stop.
InstanceInitiatedShutdownBehavior string `mapstructure:"shutdown_behavior" required:"false"`
// The EC2 instance type to use while building the
// AMI, such as t2.small.
InstanceType string `mapstructure:"instance_type" required:"true"`
// Filters used to populate the `security_group_ids` field. Example:
//
// ``` json
// {
// "security_group_filter": {
// "filters": {
// "tag:Class": "packer"
// }
// }
// }
// ```
//
// This selects the SG's with tag `Class` with the value `packer`.
//
// - `filters` (map of strings) - filters used to select a
// `security_group_ids`. Any filter described in the docs for
// [DescribeSecurityGroups](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
// is valid.
//
// `security_group_ids` take precedence over this.
SecurityGroupFilter SecurityGroupFilterOptions `mapstructure:"security_group_filter" required:"false"`
// Tags to apply to the instance that is that is *launched* to create the
// EBS volumes. This is a [template engine](/docs/templates/engine.html),
// see [Build template data](#build-template-data) for more information.
RunTags map[string]string `mapstructure:"run_tags" required:"false"`
// The ID (not the name) of the security
// group to assign to the instance. By default this is not set and Packer will
// automatically create a new temporary security group to allow SSH access.
// Note that if this is specified, you must be sure the security group allows
// access to the ssh_port given below.
SecurityGroupId string `mapstructure:"security_group_id" required:"false"`
// A list of security groups as
// described above. Note that if this is specified, you must omit the
// security_group_id.
SecurityGroupIds []string `mapstructure:"security_group_ids" required:"false"`
// The source AMI whose root volume will be copied and
// provisioned on the currently running instance. This must be an EBS-backed
// AMI with a root volume snapshot that you have access to. Note: this is not
// used when from_scratch is set to true.
SourceAmi string `mapstructure:"source_ami" required:"true"`
// Filters used to populate the `source_ami`
// field. Example:
//
// ``` json
// {
// "source_ami_filter": {
// "filters": {
// "virtualization-type": "hvm",
// "name": "ubuntu/images/\*ubuntu-xenial-16.04-amd64-server-\*",
// "root-device-type": "ebs"
// },
// "owners": ["099720109477"],
// "most_recent": true
// }
// }
// ```
//
// This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
// This will fail unless *exactly* one AMI is returned. In the above example,
// `most_recent` will cause this to succeed by selecting the newest image.
//
// - `filters` (map of strings) - filters used to select a `source_ami`.
// NOTE: This will fail unless *exactly* one AMI is returned. Any filter
// described in the docs for
// [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
// is valid.
//
// - `owners` (array of strings) - Filters the images by their owner. You
// may specify one or more AWS account IDs, "self" (which will use the
// account whose credentials you are using to run Packer), or an AWS owner
// alias: for example, `amazon`, `aws-marketplace`, or `microsoft`. This
// option is required for security reasons.
//
// - `most_recent` (boolean) - Selects the newest created image when true.
// This is most useful for selecting a daily distro build.
//
// You may set this in place of `source_ami` or in conjunction with it. If you
// set this in conjunction with `source_ami`, the `source_ami` will be added
// to the filter. The provided `source_ami` must meet all of the filtering
// criteria provided in `source_ami_filter`; this pins the AMI returned by the
// filter, but will cause Packer to fail if the `source_ami` does not exist.
SourceAmiFilter AmiFilterOptions `mapstructure:"source_ami_filter" required:"false"`
// a list of acceptable instance
// types to run your build on. We will request a spot instance using the max
// price of spot_price and the allocation strategy of "lowest price".
// Your instance will be launched on an instance type of the lowest available
// price that you have in your list. This is used in place of instance_type.
// You may only set either spot_instance_types or instance_type, not both.
// This feature exists to help prevent situations where a Packer build fails
// because a particular availability zone does not have capacity for the
// specific instance_type requested in instance_type.
SpotInstanceTypes []string `mapstructure:"spot_instance_types" required:"false"`
// The maximum hourly price to pay for a spot instance
// to create the AMI. Spot instances are a type of instance that EC2 starts
// when the current spot price is less than the maximum price you specify.
// Spot price will be updated based on available spot instance capacity and
// current spot instance requests. It may save you some costs. You can set
// this to auto for Packer to automatically discover the best spot price or
// to "0" to use an on demand instance (default).
SpotPrice string `mapstructure:"spot_price" required:"false"`
// Required if spot_price is set to
// auto. This tells Packer what sort of AMI you're launching to find the
// best spot price. This must be one of: Linux/UNIX, SUSE Linux,
// Windows, Linux/UNIX (Amazon VPC), SUSE Linux (Amazon VPC),
// Windows (Amazon VPC)
SpotPriceAutoProduct string `mapstructure:"spot_price_auto_product" required:"false"`
// Requires spot_price to be
// set. This tells Packer to apply tags to the spot request that is issued.
SpotTags map[string]string `mapstructure:"spot_tags" required:"false"`
// Filters used to populate the `subnet_id` field.
// Example:
//
// ``` json
// {
// "subnet_filter": {
// "filters": {
// "tag:Class": "build"
// },
// "most_free": true,
// "random": false
// }
// }
// ```
//
// This selects the Subnet with tag `Class` with the value `build`, which has
// the most free IP addresses. NOTE: This will fail unless *exactly* one
// Subnet is returned. By using `most_free` or `random` one will be selected
// from those matching the filter.
//
// - `filters` (map of strings) - filters used to select a `subnet_id`.
// NOTE: This will fail unless *exactly* one Subnet is returned. Any
// filter described in the docs for
// [DescribeSubnets](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html)
// is valid.
//
// - `most_free` (boolean) - The Subnet with the most free IPv4 addresses
// will be used if multiple Subnets matches the filter.
//
// - `random` (boolean) - A random Subnet will be used if multiple Subnets
// matches the filter. `most_free` have precendence over this.
//
// `subnet_id` take precedence over this.
SubnetFilter SubnetFilterOptions `mapstructure:"subnet_filter" required:"false"`
// If using VPC, the ID of the subnet, such as
// subnet-12345def, where Packer will launch the EC2 instance. This field is
// required if you are using an non-default VPC.
SubnetId string `mapstructure:"subnet_id" required:"false"`
// The name of the temporary key pair to
// generate. By default, Packer generates a name that looks like
// `packer_<UUID>`, where &lt;UUID&gt; is a 36 character unique identifier.
TemporaryKeyPairName string `mapstructure:"temporary_key_pair_name" required:"false"`
// A list of IPv4 CIDR blocks to be authorized access to the instance, when
// packer is creating a temporary security group.
//
// The default is [`0.0.0.0/0`] (i.e., allow any IPv4 source). This is only
// used when `security_group_id` or `security_group_ids` is not specified.
TemporarySGSourceCidrs []string `mapstructure:"temporary_security_group_source_cidrs" required:"false"`
// User data to apply when launching the instance. Note
// that you need to be careful about escaping characters due to the templates
// being JSON. It is often more convenient to use user_data_file, instead.
// Packer will not automatically wait for a user script to finish before
// shutting down the instance this must be handled in a provisioner.
UserData string `mapstructure:"user_data" required:"false"`
// Path to a file that will be used for the user
// data when launching the instance.
UserDataFile string `mapstructure:"user_data_file" required:"false"`
// Filters used to populate the `vpc_id` field.
// Example:
//
// ``` json
// {
// "vpc_filter": {
// "filters": {
// "tag:Class": "build",
// "isDefault": "false",
// "cidr": "/24"
// }
// }
// }
// ```
//
// This selects the VPC with tag `Class` with the value `build`, which is not
// the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
// unless *exactly* one VPC is returned.
//
// - `filters` (map of strings) - filters used to select a `vpc_id`. NOTE:
// This will fail unless *exactly* one VPC is returned. Any filter
// described in the docs for
// [DescribeVpcs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html)
// is valid.
//
// `vpc_id` take precedence over this.
VpcFilter VpcFilterOptions `mapstructure:"vpc_filter" required:"false"`
// If launching into a VPC subnet, Packer needs the VPC ID
// in order to create a temporary security group within the VPC. Requires
// subnet_id to be set. If this field is left blank, Packer will try to get
// the VPC ID from the subnet_id.
VpcId string `mapstructure:"vpc_id" required:"false"`
// The timeout for waiting for a Windows
// password for Windows instances. Defaults to 20 minutes. Example value:
// 10m
WindowsPasswordTimeout time.Duration `mapstructure:"windows_password_timeout" required:"false"`
// Communicator settings
Comm communicator.Config `mapstructure:",squash"`
SSHKeyPairName string `mapstructure:"ssh_keypair_name"`
SSHPrivateIp bool `mapstructure:"ssh_private_ip"`
SSHInterface string `mapstructure:"ssh_interface"`
Comm communicator.Config `mapstructure:",squash"`
SSHInterface string `mapstructure:"ssh_interface"`
}
func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
@ -62,10 +344,10 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
// ssh_private_key_file, then create a temporary one, but only if the
// temporary_key_pair_name has not been provided and we are not using
// ssh_password.
if c.SSHKeyPairName == "" && c.TemporaryKeyPairName == "" &&
c.Comm.SSHPrivateKey == "" && c.Comm.SSHPassword == "" {
if c.Comm.SSHKeyPairName == "" && c.Comm.SSHTemporaryKeyPairName == "" &&
c.Comm.SSHPrivateKeyFile == "" && c.Comm.SSHPassword == "" {
c.TemporaryKeyPairName = fmt.Sprintf("packer_%s", uuid.TimeOrderedUUID())
c.Comm.SSHTemporaryKeyPairName = fmt.Sprintf("packer_%s", uuid.TimeOrderedUUID())
}
if c.WindowsPasswordTimeout == 0 {
@ -78,44 +360,51 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
// Validation
errs := c.Comm.Prepare(ctx)
if c.SSHPrivateIp && c.SSHInterface != "" {
errs = append(errs, errors.New("ssh_interface and ssh_private_ip should not both be specified"))
}
// Legacy configurable
if c.SSHPrivateIp {
c.SSHInterface = "private_ip"
}
// Valadating ssh_interface
// Validating ssh_interface
if c.SSHInterface != "public_ip" &&
c.SSHInterface != "private_ip" &&
c.SSHInterface != "public_dns" &&
c.SSHInterface != "private_dns" &&
c.SSHInterface != "" {
errs = append(errs, errors.New(fmt.Sprintf("Unknown interface type: %s", c.SSHInterface)))
errs = append(errs, fmt.Errorf("Unknown interface type: %s", c.SSHInterface))
}
if c.SSHKeyPairName != "" {
if c.Comm.Type == "winrm" && c.Comm.WinRMPassword == "" && c.Comm.SSHPrivateKey == "" {
errs = append(errs, errors.New("ssh_private_key_file must be provided to retrieve the winrm password when using ssh_keypair_name."))
} else if c.Comm.SSHPrivateKey == "" && !c.Comm.SSHAgentAuth {
errs = append(errs, errors.New("ssh_private_key_file must be provided or ssh_agent_auth enabled when ssh_keypair_name is specified."))
if c.Comm.SSHKeyPairName != "" {
if c.Comm.Type == "winrm" && c.Comm.WinRMPassword == "" && c.Comm.SSHPrivateKeyFile == "" {
errs = append(errs, fmt.Errorf("ssh_private_key_file must be provided to retrieve the winrm password when using ssh_keypair_name."))
} else if c.Comm.SSHPrivateKeyFile == "" && !c.Comm.SSHAgentAuth {
errs = append(errs, fmt.Errorf("ssh_private_key_file must be provided or ssh_agent_auth enabled when ssh_keypair_name is specified."))
}
}
if c.SourceAmi == "" && c.SourceAmiFilter.Empty() {
errs = append(errs, errors.New("A source_ami or source_ami_filter must be specified"))
errs = append(errs, fmt.Errorf("A source_ami or source_ami_filter must be specified"))
}
if c.InstanceType == "" {
errs = append(errs, errors.New("An instance_type must be specified"))
if c.SourceAmi == "" && c.SourceAmiFilter.NoOwner() {
errs = append(errs, fmt.Errorf("For security reasons, your source AMI filter must declare an owner."))
}
if c.SpotPrice == "auto" {
if c.SpotPriceAutoProduct == "" {
errs = append(errs, errors.New(
"spot_price_auto_product must be specified when spot_price is auto"))
if c.InstanceType == "" && len(c.SpotInstanceTypes) == 0 {
errs = append(errs, fmt.Errorf("either instance_type or "+
"spot_instance_types must be specified"))
}
if c.InstanceType != "" && len(c.SpotInstanceTypes) > 0 {
errs = append(errs, fmt.Errorf("either instance_type or "+
"spot_instance_types must be specified, not both"))
}
if c.BlockDurationMinutes%60 != 0 {
errs = append(errs, fmt.Errorf(
"block_duration_minutes must be multiple of 60"))
}
if c.SpotTags != nil {
if c.SpotPrice == "" || c.SpotPrice == "0" {
errs = append(errs, fmt.Errorf(
"spot_tags should not be set when not requesting a spot instance"))
}
}
@ -136,11 +425,13 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
}
}
if c.TemporarySGSourceCidr == "" {
c.TemporarySGSourceCidr = "0.0.0.0/0"
if len(c.TemporarySGSourceCidrs) == 0 {
c.TemporarySGSourceCidrs = []string{"0.0.0.0/0"}
} else {
if _, _, err := net.ParseCIDR(c.TemporarySGSourceCidr); err != nil {
errs = append(errs, fmt.Errorf("Error parsing temporary_security_group_source_cidr: %s", err.Error()))
for _, cidr := range c.TemporarySGSourceCidrs {
if _, _, err := net.ParseCIDR(cidr); err != nil {
errs = append(errs, fmt.Errorf("Error parsing CIDR in temporary_security_group_source_cidrs: %s", err.Error()))
}
}
}
@ -150,6 +441,18 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
errs = append(errs, fmt.Errorf("shutdown_behavior only accepts 'stop' or 'terminate' values."))
}
if c.EnableT2Unlimited {
if c.SpotPrice != "" {
errs = append(errs, fmt.Errorf("Error: T2 Unlimited cannot be used in conjuction with Spot Instances"))
}
firstDotIndex := strings.Index(c.InstanceType, ".")
if firstDotIndex == -1 {
errs = append(errs, fmt.Errorf("Error determining main Instance Type from: %s", c.InstanceType))
} else if c.InstanceType[0:firstDotIndex] != "t2" {
errs = append(errs, fmt.Errorf("Error: T2 Unlimited enabled with a non-T2 Instance Type: %s", c.InstanceType))
}
}
return errs
}

View File

@ -24,7 +24,9 @@ func testConfig() *RunConfig {
InstanceType: "m1.small",
Comm: communicator.Config{
SSHUsername: "foo",
SSH: communicator.SSH{
SSHUsername: "foo",
},
},
}
}
@ -48,22 +50,32 @@ func TestRunConfigPrepare_InstanceType(t *testing.T) {
c := testConfig()
c.InstanceType = ""
if err := c.Prepare(nil); len(err) != 1 {
t.Fatalf("err: %s", err)
t.Fatalf("Should error if an instance_type is not specified")
}
}
func TestRunConfigPrepare_SourceAmi(t *testing.T) {
c := testConfig()
c.SourceAmi = ""
if err := c.Prepare(nil); len(err) != 1 {
t.Fatalf("err: %s", err)
if err := c.Prepare(nil); len(err) != 2 {
t.Fatalf("Should error if a source_ami (or source_ami_filter) is not specified")
}
}
func TestRunConfigPrepare_SourceAmiFilterBlank(t *testing.T) {
c := testConfigFilter()
if err := c.Prepare(nil); len(err) != 2 {
t.Fatalf("Should error if source_ami_filter is empty or not specified (and source_ami is not specified)")
}
}
func TestRunConfigPrepare_SourceAmiFilterOwnersBlank(t *testing.T) {
c := testConfigFilter()
filter_key := "name"
filter_value := "foo"
c.SourceAmiFilter = AmiFilterOptions{Filters: map[*string]*string{&filter_key: &filter_value}}
if err := c.Prepare(nil); len(err) != 1 {
t.Fatalf("err: %s", err)
t.Fatalf("Should error if Owners is not specified)")
}
}
@ -79,14 +91,49 @@ func TestRunConfigPrepare_SourceAmiFilterGood(t *testing.T) {
}
}
func TestRunConfigPrepare_EnableT2UnlimitedGood(t *testing.T) {
c := testConfig()
// Must have a T2 instance type if T2 Unlimited is enabled
c.InstanceType = "t2.micro"
c.EnableT2Unlimited = true
err := c.Prepare(nil)
if len(err) > 0 {
t.Fatalf("err: %s", err)
}
}
func TestRunConfigPrepare_EnableT2UnlimitedBadInstanceType(t *testing.T) {
c := testConfig()
// T2 Unlimited cannot be used with instance types other than T2
c.InstanceType = "m5.large"
c.EnableT2Unlimited = true
err := c.Prepare(nil)
if len(err) != 1 {
t.Fatalf("Should error if T2 Unlimited is enabled with non-T2 instance_type")
}
}
func TestRunConfigPrepare_EnableT2UnlimitedBadWithSpotInstanceRequest(t *testing.T) {
c := testConfig()
// T2 Unlimited cannot be used with Spot Instances
c.InstanceType = "t2.micro"
c.EnableT2Unlimited = true
c.SpotPrice = "auto"
err := c.Prepare(nil)
if len(err) != 1 {
t.Fatalf("Should error if T2 Unlimited has been used in conjuntion with a Spot Price request")
}
}
func TestRunConfigPrepare_SpotAuto(t *testing.T) {
c := testConfig()
c.SpotPrice = "auto"
if err := c.Prepare(nil); len(err) != 1 {
if err := c.Prepare(nil); len(err) != 0 {
t.Fatalf("err: %s", err)
}
c.SpotPriceAutoProduct = "foo"
// Shouldn't error (YET) even though SpotPriceAutoProduct is deprecated
c.SpotPriceAutoProduct = "Linux/Unix"
if err := c.Prepare(nil); len(err) != 0 {
t.Fatalf("err: %s", err)
}
@ -119,12 +166,13 @@ func TestRunConfigPrepare_UserData(t *testing.T) {
if err != nil {
t.Fatalf("err: %s", err)
}
defer os.Remove(tf.Name())
defer tf.Close()
c.UserData = "foo"
c.UserDataFile = tf.Name()
if err := c.Prepare(nil); len(err) != 1 {
t.Fatalf("err: %s", err)
t.Fatalf("Should error if user_data string and user_data_file have both been specified")
}
}
@ -136,13 +184,14 @@ func TestRunConfigPrepare_UserDataFile(t *testing.T) {
c.UserDataFile = "idontexistidontthink"
if err := c.Prepare(nil); len(err) != 1 {
t.Fatalf("err: %s", err)
t.Fatalf("Should error if the file specified by user_data_file does not exist")
}
tf, err := ioutil.TempFile("", "packer")
if err != nil {
t.Fatalf("err: %s", err)
}
defer os.Remove(tf.Name())
defer tf.Close()
c.UserDataFile = tf.Name()
@ -153,27 +202,27 @@ func TestRunConfigPrepare_UserDataFile(t *testing.T) {
func TestRunConfigPrepare_TemporaryKeyPairName(t *testing.T) {
c := testConfig()
c.TemporaryKeyPairName = ""
c.Comm.SSHTemporaryKeyPairName = ""
if err := c.Prepare(nil); len(err) != 0 {
t.Fatalf("err: %s", err)
}
if c.TemporaryKeyPairName == "" {
if c.Comm.SSHTemporaryKeyPairName == "" {
t.Fatal("keypair name is empty")
}
// Match prefix and UUID, e.g. "packer_5790d491-a0b8-c84c-c9d2-2aea55086550".
r := regexp.MustCompile(`\Apacker_(?:(?i)[a-f\d]{8}(?:-[a-f\d]{4}){3}-[a-f\d]{12}?)\z`)
if !r.MatchString(c.TemporaryKeyPairName) {
if !r.MatchString(c.Comm.SSHTemporaryKeyPairName) {
t.Fatal("keypair name is not valid")
}
c.TemporaryKeyPairName = "ssh-key-123"
c.Comm.SSHTemporaryKeyPairName = "ssh-key-123"
if err := c.Prepare(nil); len(err) != 0 {
t.Fatalf("err: %s", err)
}
if c.TemporaryKeyPairName != "ssh-key-123" {
if c.Comm.SSHTemporaryKeyPairName != "ssh-key-123" {
t.Fatal("keypair name does not match")
}
}

View File

@ -3,15 +3,11 @@ package common
import (
"errors"
"fmt"
"net"
"os"
"log"
"time"
"github.com/aws/aws-sdk-go/service/ec2"
packerssh "github.com/hashicorp/packer/communicator/ssh"
"github.com/mitchellh/multistep"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
"github.com/hashicorp/packer/helper/multistep"
)
type ec2Describer interface {
@ -25,12 +21,16 @@ var (
// SSHHost returns a function that can be given to the SSH communicator
// for determining the SSH address based on the instance DNS name.
func SSHHost(e ec2Describer, sshInterface string) func(multistep.StateBag) (string, error) {
func SSHHost(e ec2Describer, sshInterface string, host string) func(multistep.StateBag) (string, error) {
return func(state multistep.StateBag) (string, error) {
if host != "" {
log.Printf("Using ssh_host value: %s", host)
return host, nil
}
const tries = 2
// <= with current structure to check result of describing `tries` times
for j := 0; j <= tries; j++ {
var host string
i := state.Get("instance").(*ec2.Instance)
if sshInterface != "" {
switch sshInterface {
@ -85,56 +85,3 @@ func SSHHost(e ec2Describer, sshInterface string) func(multistep.StateBag) (stri
return "", errors.New("couldn't determine address for instance")
}
}
// SSHConfig returns a function that can be used for the SSH communicator
// config for connecting to the instance created over SSH using the private key
// or password.
func SSHConfig(useAgent bool, username, password string) func(multistep.StateBag) (*ssh.ClientConfig, error) {
return func(state multistep.StateBag) (*ssh.ClientConfig, error) {
if useAgent {
authSock := os.Getenv("SSH_AUTH_SOCK")
if authSock == "" {
return nil, fmt.Errorf("SSH_AUTH_SOCK is not set")
}
sshAgent, err := net.Dial("unix", authSock)
if err != nil {
return nil, fmt.Errorf("Cannot connect to SSH Agent socket %q: %s", authSock, err)
}
return &ssh.ClientConfig{
User: username,
Auth: []ssh.AuthMethod{
ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}, nil
}
privateKey, hasKey := state.GetOk("privateKey")
if hasKey {
signer, err := ssh.ParsePrivateKey([]byte(privateKey.(string)))
if err != nil {
return nil, fmt.Errorf("Error setting up SSH config: %s", err)
}
return &ssh.ClientConfig{
User: username,
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}, nil
} else {
return &ssh.ClientConfig{
User: username,
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
Auth: []ssh.AuthMethod{
ssh.Password(password),
ssh.KeyboardInteractive(
packerssh.PasswordKeyboardInteractive(password)),
}}, nil
}
}
}

View File

@ -5,14 +5,15 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/mitchellh/multistep"
"github.com/hashicorp/packer/helper/multistep"
)
const (
privateIP = "10.0.0.1"
publicIP = "192.168.1.1"
privateDNS = "private.dns.test"
publicDNS = "public.dns.test"
privateIP = "10.0.0.1"
publicIP = "192.168.1.1"
privateDNS = "private.dns.test"
publicDNS = "public.dns.test"
sshHostTemplate = "custom.host.value"
)
func TestSSHHost(t *testing.T) {
@ -25,39 +26,46 @@ func TestSSHHost(t *testing.T) {
vpcId string
sshInterface string
ok bool
wantHost string
ok bool
wantHost string
sshHostOverride string
}{
{1, "", "", true, publicDNS},
{1, "", "private_ip", true, privateIP},
{1, "vpc-id", "", true, publicIP},
{1, "vpc-id", "private_ip", true, privateIP},
{1, "vpc-id", "private_dns", true, privateDNS},
{1, "vpc-id", "public_dns", true, publicDNS},
{1, "vpc-id", "public_ip", true, publicIP},
{2, "", "", true, publicDNS},
{2, "", "private_ip", true, privateIP},
{2, "vpc-id", "", true, publicIP},
{2, "vpc-id", "private_ip", true, privateIP},
{2, "vpc-id", "private_dns", true, privateDNS},
{2, "vpc-id", "public_dns", true, publicDNS},
{2, "vpc-id", "public_ip", true, publicIP},
{3, "", "", false, ""},
{3, "", "private_ip", false, ""},
{3, "vpc-id", "", false, ""},
{3, "vpc-id", "private_ip", false, ""},
{3, "vpc-id", "private_dns", false, ""},
{3, "vpc-id", "public_dns", false, ""},
{3, "vpc-id", "public_ip", false, ""},
{1, "", "", true, publicDNS, ""},
{1, "", "private_ip", true, privateIP, ""},
{1, "vpc-id", "", true, publicIP, ""},
{1, "vpc-id", "private_ip", true, privateIP, ""},
{1, "vpc-id", "private_dns", true, privateDNS, ""},
{1, "vpc-id", "public_dns", true, publicDNS, ""},
{1, "vpc-id", "public_ip", true, publicIP, ""},
{2, "", "", true, publicDNS, ""},
{2, "", "private_ip", true, privateIP, ""},
{2, "vpc-id", "", true, publicIP, ""},
{2, "vpc-id", "private_ip", true, privateIP, ""},
{2, "vpc-id", "private_dns", true, privateDNS, ""},
{2, "vpc-id", "public_dns", true, publicDNS, ""},
{2, "vpc-id", "public_ip", true, publicIP, ""},
{3, "", "", false, "", ""},
{3, "", "private_ip", false, "", ""},
{3, "vpc-id", "", false, "", ""},
{3, "vpc-id", "private_ip", false, "", ""},
{3, "vpc-id", "private_dns", false, "", ""},
{3, "vpc-id", "public_dns", false, "", ""},
{3, "vpc-id", "public_ip", false, "", ""},
{1, "", "", true, sshHostTemplate, sshHostTemplate},
{1, "vpc-id", "", true, sshHostTemplate, sshHostTemplate},
{2, "vpc-id", "private_dns", true, sshHostTemplate, sshHostTemplate},
}
for _, c := range cases {
testSSHHost(t, c.allowTries, c.vpcId, c.sshInterface, c.ok, c.wantHost)
testSSHHost(t, c.allowTries, c.vpcId, c.sshInterface, c.ok, c.wantHost,
c.sshHostOverride)
}
}
func testSSHHost(t *testing.T, allowTries int, vpcId string, sshInterface string, ok bool, wantHost string) {
t.Logf("allowTries=%d vpcId=%s sshInterface=%s ok=%t wantHost=%q", allowTries, vpcId, sshInterface, ok, wantHost)
func testSSHHost(t *testing.T, allowTries int, vpcId string, sshInterface string,
ok bool, wantHost string, sshHostOverride string) {
t.Logf("allowTries=%d vpcId=%s sshInterface=%s ok=%t wantHost=%q sshHostOverride=%s",
allowTries, vpcId, sshInterface, ok, wantHost, sshHostOverride)
e := &fakeEC2Describer{
allowTries: allowTries,
@ -68,7 +76,7 @@ func testSSHHost(t *testing.T, allowTries int, vpcId string, sshInterface string
publicDNS: publicDNS,
}
f := SSHHost(e, sshInterface)
f := SSHHost(e, sshInterface, sshHostOverride)
st := &multistep.BasicStateBag{}
st.Put("instance", &ec2.Instance{
InstanceId: aws.String("instance-id"),

View File

@ -1,18 +1,17 @@
package common
import (
"errors"
"fmt"
"log"
"net"
"os"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/mitchellh/multistep"
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
"github.com/hashicorp/packer/helper/multistep"
)
// StateRefreshFunc is a function type used for StateChangeConf that is
@ -35,228 +34,333 @@ type StateChangeConf struct {
Target string
}
// AMIStateRefreshFunc returns a StateRefreshFunc that is used to watch
// an AMI for state changes.
func AMIStateRefreshFunc(conn *ec2.EC2, imageId string) StateRefreshFunc {
return func() (interface{}, string, error) {
resp, err := conn.DescribeImages(&ec2.DescribeImagesInput{
ImageIds: []*string{&imageId},
})
if err != nil {
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAMIID.NotFound" {
// Set this to nil as if we didn't find anything.
resp = nil
} else if isTransientNetworkError(err) {
// Transient network error, treat it as if we didn't find anything
resp = nil
} else {
log.Printf("Error on AMIStateRefresh: %s", err)
return nil, "", err
}
}
// Following are wrapper functions that use Packer's environment-variables to
// determine retry logic, then call the AWS SDK's built-in waiters.
if resp == nil || len(resp.Images) == 0 {
// Sometimes AWS has consistency issues and doesn't see the
// AMI. Return an empty state.
return nil, "", nil
}
i := resp.Images[0]
return i, *i.State, nil
func WaitUntilAMIAvailable(ctx aws.Context, conn ec2iface.EC2API, imageId string) error {
imageInput := ec2.DescribeImagesInput{
ImageIds: []*string{&imageId},
}
waitOpts := getWaiterOptions()
if len(waitOpts) == 0 {
// Bump this default to 30 minutes because the aws default
// of ten minutes doesn't work for some of our long-running copies.
waitOpts = append(waitOpts, request.WithWaiterMaxAttempts(120))
}
err := conn.WaitUntilImageAvailableWithContext(
ctx,
&imageInput,
waitOpts...)
return err
}
// InstanceStateRefreshFunc returns a StateRefreshFunc that is used to watch
// an EC2 instance.
func InstanceStateRefreshFunc(conn *ec2.EC2, instanceId string) StateRefreshFunc {
return func() (interface{}, string, error) {
resp, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{
InstanceIds: []*string{&instanceId},
})
if err != nil {
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidInstanceID.NotFound" {
// Set this to nil as if we didn't find anything.
resp = nil
} else if isTransientNetworkError(err) {
// Transient network error, treat it as if we didn't find anything
resp = nil
} else {
log.Printf("Error on InstanceStateRefresh: %s", err)
return nil, "", err
}
}
func WaitUntilInstanceTerminated(ctx aws.Context, conn *ec2.EC2, instanceId string) error {
if resp == nil || len(resp.Reservations) == 0 || len(resp.Reservations[0].Instances) == 0 {
// Sometimes AWS just has consistency issues and doesn't see
// our instance yet. Return an empty state.
return nil, "", nil
}
i := resp.Reservations[0].Instances[0]
return i, *i.State.Name, nil
instanceInput := ec2.DescribeInstancesInput{
InstanceIds: []*string{&instanceId},
}
err := conn.WaitUntilInstanceTerminatedWithContext(
ctx,
&instanceInput,
getWaiterOptions()...)
return err
}
// SpotRequestStateRefreshFunc returns a StateRefreshFunc that is used to watch
// a spot request for state changes.
func SpotRequestStateRefreshFunc(conn *ec2.EC2, spotRequestId string) StateRefreshFunc {
return func() (interface{}, string, error) {
resp, err := conn.DescribeSpotInstanceRequests(&ec2.DescribeSpotInstanceRequestsInput{
SpotInstanceRequestIds: []*string{&spotRequestId},
})
if err != nil {
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSpotInstanceRequestID.NotFound" {
// Set this to nil as if we didn't find anything.
resp = nil
} else if isTransientNetworkError(err) {
// Transient network error, treat it as if we didn't find anything
resp = nil
} else {
log.Printf("Error on SpotRequestStateRefresh: %s", err)
return nil, "", err
}
}
if resp == nil || len(resp.SpotInstanceRequests) == 0 {
// Sometimes AWS has consistency issues and doesn't see the
// SpotRequest. Return an empty state.
return nil, "", nil
}
i := resp.SpotInstanceRequests[0]
return i, *i.State, nil
// This function works for both requesting and cancelling spot instances.
func WaitUntilSpotRequestFulfilled(ctx aws.Context, conn *ec2.EC2, spotRequestId string) error {
spotRequestInput := ec2.DescribeSpotInstanceRequestsInput{
SpotInstanceRequestIds: []*string{&spotRequestId},
}
err := conn.WaitUntilSpotInstanceRequestFulfilledWithContext(
ctx,
&spotRequestInput,
getWaiterOptions()...)
return err
}
func ImportImageRefreshFunc(conn *ec2.EC2, importTaskId string) StateRefreshFunc {
return func() (interface{}, string, error) {
resp, err := conn.DescribeImportImageTasks(&ec2.DescribeImportImageTasksInput{
ImportTaskIds: []*string{
&importTaskId,
func WaitUntilVolumeAvailable(ctx aws.Context, conn *ec2.EC2, volumeId string) error {
volumeInput := ec2.DescribeVolumesInput{
VolumeIds: []*string{&volumeId},
}
err := conn.WaitUntilVolumeAvailableWithContext(
ctx,
&volumeInput,
getWaiterOptions()...)
return err
}
func WaitUntilSnapshotDone(ctx aws.Context, conn *ec2.EC2, snapshotID string) error {
snapInput := ec2.DescribeSnapshotsInput{
SnapshotIds: []*string{&snapshotID},
}
err := conn.WaitUntilSnapshotCompletedWithContext(
ctx,
&snapInput,
getWaiterOptions()...)
return err
}
// Wrappers for our custom AWS waiters
func WaitUntilVolumeAttached(ctx aws.Context, conn *ec2.EC2, volumeId string) error {
volumeInput := ec2.DescribeVolumesInput{
VolumeIds: []*string{&volumeId},
}
err := WaitForVolumeToBeAttached(conn,
ctx,
&volumeInput,
getWaiterOptions()...)
return err
}
func WaitUntilVolumeDetached(ctx aws.Context, conn *ec2.EC2, volumeId string) error {
volumeInput := ec2.DescribeVolumesInput{
VolumeIds: []*string{&volumeId},
}
err := WaitForVolumeToBeDetached(conn,
ctx,
&volumeInput,
getWaiterOptions()...)
return err
}
func WaitUntilImageImported(ctx aws.Context, conn *ec2.EC2, taskID string) error {
importInput := ec2.DescribeImportImageTasksInput{
ImportTaskIds: []*string{&taskID},
}
err := WaitForImageToBeImported(conn,
ctx,
&importInput,
getWaiterOptions()...)
return err
}
// Custom waiters using AWS's request.Waiter
func WaitForVolumeToBeAttached(c *ec2.EC2, ctx aws.Context, input *ec2.DescribeVolumesInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "DescribeVolumes",
MaxAttempts: 40,
Delay: request.ConstantWaiterDelay(5 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.SuccessWaiterState,
Matcher: request.PathAllWaiterMatch,
Argument: "Volumes[].Attachments[].State",
Expected: "attached",
},
},
)
if err != nil {
if ec2err, ok := err.(awserr.Error); ok && strings.HasPrefix(ec2err.Code(), "InvalidConversionTaskId") {
resp = nil
} else if isTransientNetworkError(err) {
resp = nil
} else {
log.Printf("Error on ImportImageRefresh: %s", err)
return nil, "", err
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *ec2.DescribeVolumesInput
if input != nil {
tmp := *input
inCpy = &tmp
}
}
if resp == nil || len(resp.ImportImageTasks) == 0 {
return nil, "", nil
}
i := resp.ImportImageTasks[0]
return i, *i.Status, nil
req, _ := c.DescribeVolumesRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}
// WaitForState watches an object and waits for it to achieve a certain
// state.
func WaitForState(conf *StateChangeConf) (i interface{}, err error) {
log.Printf("Waiting for state to become: %s", conf.Target)
sleepSeconds := SleepSeconds()
maxTicks := TimeoutSeconds()/sleepSeconds + 1
notfoundTick := 0
for {
var currentState string
i, currentState, err = conf.Refresh()
if err != nil {
return
}
if i == nil {
// If we didn't find the resource, check if we have been
// not finding it for awhile, and if so, report an error.
notfoundTick += 1
if notfoundTick > maxTicks {
return nil, errors.New("couldn't find resource")
func WaitForVolumeToBeDetached(c *ec2.EC2, ctx aws.Context, input *ec2.DescribeVolumesInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "DescribeVolumes",
MaxAttempts: 40,
Delay: request.ConstantWaiterDelay(5 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.SuccessWaiterState,
Matcher: request.PathAllWaiterMatch,
Argument: "length(Volumes[].Attachments[]) == `0`",
Expected: true,
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *ec2.DescribeVolumesInput
if input != nil {
tmp := *input
inCpy = &tmp
}
} else {
// Reset the counter for when a resource isn't found
notfoundTick = 0
if currentState == conf.Target {
return
}
if conf.StepState != nil {
if _, ok := conf.StepState.GetOk(multistep.StateCancelled); ok {
return nil, errors.New("interrupted")
}
}
found := false
for _, allowed := range conf.Pending {
if currentState == allowed {
found = true
break
}
}
if !found {
err := fmt.Errorf("unexpected state '%s', wanted target '%s'", currentState, conf.Target)
return nil, err
}
}
time.Sleep(time.Duration(sleepSeconds) * time.Second)
req, _ := c.DescribeVolumesRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}
func isTransientNetworkError(err error) bool {
if nerr, ok := err.(net.Error); ok && nerr.Temporary() {
return true
func WaitForImageToBeImported(c *ec2.EC2, ctx aws.Context, input *ec2.DescribeImportImageTasksInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "DescribeImages",
MaxAttempts: 720,
Delay: request.ConstantWaiterDelay(5 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.SuccessWaiterState,
Matcher: request.PathAllWaiterMatch,
Argument: "ImportImageTasks[].Status",
Expected: "completed",
},
{
State: request.FailureWaiterState,
Matcher: request.PathAnyWaiterMatch,
Argument: "ImportImageTasks[].Status",
Expected: "deleted",
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *ec2.DescribeImportImageTasksInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeImportImageTasksRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return false
return w.WaitWithContext(ctx)
}
// Returns 300 seconds (5 minutes) by default
// Some AWS operations, like copying an AMI to a distant region, take a very long time
// Allow user to override with AWS_TIMEOUT_SECONDS environment variable
func TimeoutSeconds() (seconds int) {
seconds = 300
// This helper function uses the environment variables AWS_TIMEOUT_SECONDS and
// AWS_POLL_DELAY_SECONDS to generate waiter options that can be passed into any
// request.Waiter function. These options will control how many times the waiter
// will retry the request, as well as how long to wait between the retries.
override := os.Getenv("AWS_TIMEOUT_SECONDS")
// DEFAULTING BEHAVIOR:
// if AWS_POLL_DELAY_SECONDS is set but the others are not, Packer will set this
// poll delay and use the waiter-specific default
// if AWS_TIMEOUT_SECONDS is set but AWS_MAX_ATTEMPTS is not, Packer will use
// AWS_TIMEOUT_SECONDS and _either_ AWS_POLL_DELAY_SECONDS _or_ 2 if the user has not set AWS_POLL_DELAY_SECONDS, to determine a max number of attempts to make.
// if AWS_TIMEOUT_SECONDS, _and_ AWS_MAX_ATTEMPTS are both set,
// AWS_TIMEOUT_SECONDS will be ignored.
// if AWS_MAX_ATTEMPTS is set but AWS_POLL_DELAY_SECONDS is not, then we will
// use waiter-specific defaults.
type envInfo struct {
envKey string
Val int
overridden bool
}
type overridableWaitVars struct {
awsPollDelaySeconds envInfo
awsMaxAttempts envInfo
awsTimeoutSeconds envInfo
}
func getWaiterOptions() []request.WaiterOption {
envOverrides := getEnvOverrides()
waitOpts := applyEnvOverrides(envOverrides)
return waitOpts
}
func getOverride(varInfo envInfo) envInfo {
override := os.Getenv(varInfo.envKey)
if override != "" {
n, err := strconv.Atoi(override)
if err != nil {
log.Printf("Invalid timeout seconds '%s', using default", override)
log.Printf("Invalid %s '%s', using default", varInfo.envKey, override)
} else {
seconds = n
varInfo.overridden = true
varInfo.Val = n
}
}
log.Printf("Allowing %ds to complete (change with AWS_TIMEOUT_SECONDS)", seconds)
return seconds
return varInfo
}
// Returns 2 seconds by default
// AWS async operations sometimes takes long times, if there are multiple parallel builds,
// polling at 2 second frequency will exceed the request limit. Allow 2 seconds to be
// overwritten with AWS_POLL_DELAY_SECONDS
func SleepSeconds() (seconds int) {
seconds = 2
override := os.Getenv("AWS_POLL_DELAY_SECONDS")
if override != "" {
n, err := strconv.Atoi(override)
if err != nil {
log.Printf("Invalid sleep seconds '%s', using default", override)
} else {
seconds = n
}
func getEnvOverrides() overridableWaitVars {
// Load env vars from environment.
envValues := overridableWaitVars{
envInfo{"AWS_POLL_DELAY_SECONDS", 2, false},
envInfo{"AWS_MAX_ATTEMPTS", 0, false},
envInfo{"AWS_TIMEOUT_SECONDS", 0, false},
}
log.Printf("Using %ds as polling delay (change with AWS_POLL_DELAY_SECONDS)", seconds)
return seconds
envValues.awsMaxAttempts = getOverride(envValues.awsMaxAttempts)
envValues.awsPollDelaySeconds = getOverride(envValues.awsPollDelaySeconds)
envValues.awsTimeoutSeconds = getOverride(envValues.awsTimeoutSeconds)
return envValues
}
func LogEnvOverrideWarnings() {
pollDelay := os.Getenv("AWS_POLL_DELAY_SECONDS")
timeoutSeconds := os.Getenv("AWS_TIMEOUT_SECONDS")
maxAttempts := os.Getenv("AWS_MAX_ATTEMPTS")
if maxAttempts != "" && timeoutSeconds != "" {
warning := fmt.Sprintf("[WARNING] (aws): AWS_MAX_ATTEMPTS and " +
"AWS_TIMEOUT_SECONDS are both set. Packer will use " +
"AWS_MAX_ATTEMPTS and discard AWS_TIMEOUT_SECONDS.")
if pollDelay == "" {
warning = fmt.Sprintf("%s Since you have not set the poll delay, "+
"Packer will default to a 2-second delay.", warning)
}
log.Printf(warning)
} else if timeoutSeconds != "" {
log.Printf("[WARNING] (aws): env var AWS_TIMEOUT_SECONDS is " +
"deprecated in favor of AWS_MAX_ATTEMPTS. If you have not " +
"explicitly set AWS_POLL_DELAY_SECONDS, we are defaulting to a " +
"poll delay of 2 seconds, regardless of the AWS waiter's default.")
}
if maxAttempts == "" && timeoutSeconds == "" && pollDelay == "" {
log.Printf("[INFO] (aws): No AWS timeout and polling overrides have been set. " +
"Packer will default to waiter-specific delays and timeouts. If you would " +
"like to customize the length of time between retries and max " +
"number of retries you may do so by setting the environment " +
"variables AWS_POLL_DELAY_SECONDS and AWS_MAX_ATTEMPTS to your " +
"desired values.")
}
}
func applyEnvOverrides(envOverrides overridableWaitVars) []request.WaiterOption {
waitOpts := make([]request.WaiterOption, 0)
// If user has set poll delay seconds, overwrite it. If user has NOT,
// default to a poll delay of 2 seconds
if envOverrides.awsPollDelaySeconds.overridden {
delaySeconds := request.ConstantWaiterDelay(time.Duration(envOverrides.awsPollDelaySeconds.Val) * time.Second)
waitOpts = append(waitOpts, request.WithWaiterDelay(delaySeconds))
}
// If user has set max attempts, overwrite it. If user hasn't set max
// attempts, default to whatever the waiter has set as a default.
if envOverrides.awsMaxAttempts.overridden {
waitOpts = append(waitOpts, request.WithWaiterMaxAttempts(envOverrides.awsMaxAttempts.Val))
} else if envOverrides.awsTimeoutSeconds.overridden {
maxAttempts := envOverrides.awsTimeoutSeconds.Val / envOverrides.awsPollDelaySeconds.Val
// override the delay so we can get the timeout right
if !envOverrides.awsPollDelaySeconds.overridden {
delaySeconds := request.ConstantWaiterDelay(time.Duration(envOverrides.awsPollDelaySeconds.Val) * time.Second)
waitOpts = append(waitOpts, request.WithWaiterDelay(delaySeconds))
}
waitOpts = append(waitOpts, request.WithWaiterMaxAttempts(maxAttempts))
}
return waitOpts
}

View File

@ -0,0 +1,66 @@
package common
import (
"reflect"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws/request"
)
func testGetWaiterOptions(t *testing.T) {
// no vars are set
envValues := overridableWaitVars{
envInfo{"AWS_POLL_DELAY_SECONDS", 2, false},
envInfo{"AWS_MAX_ATTEMPTS", 0, false},
envInfo{"AWS_TIMEOUT_SECONDS", 300, false},
}
options := applyEnvOverrides(envValues)
if len(options) > 0 {
t.Fatalf("Did not expect any waiter options to be generated; actual: %#v", options)
}
// all vars are set
envValues = overridableWaitVars{
envInfo{"AWS_POLL_DELAY_SECONDS", 1, true},
envInfo{"AWS_MAX_ATTEMPTS", 800, true},
envInfo{"AWS_TIMEOUT_SECONDS", 20, true},
}
options = applyEnvOverrides(envValues)
expected := []request.WaiterOption{
request.WithWaiterDelay(request.ConstantWaiterDelay(time.Duration(1) * time.Second)),
request.WithWaiterMaxAttempts(800),
}
if !reflect.DeepEqual(options, expected) {
t.Fatalf("expected != actual!! Expected: %#v; Actual: %#v.", expected, options)
}
// poll delay is not set
envValues = overridableWaitVars{
envInfo{"AWS_POLL_DELAY_SECONDS", 2, false},
envInfo{"AWS_MAX_ATTEMPTS", 800, true},
envInfo{"AWS_TIMEOUT_SECONDS", 300, false},
}
options = applyEnvOverrides(envValues)
expected = []request.WaiterOption{
request.WithWaiterMaxAttempts(800),
}
if !reflect.DeepEqual(options, expected) {
t.Fatalf("expected != actual!! Expected: %#v; Actual: %#v.", expected, options)
}
// poll delay is not set but timeout seconds is
envValues = overridableWaitVars{
envInfo{"AWS_POLL_DELAY_SECONDS", 2, false},
envInfo{"AWS_MAX_ATTEMPTS", 0, false},
envInfo{"AWS_TIMEOUT_SECONDS", 20, true},
}
options = applyEnvOverrides(envValues)
expected = []request.WaiterOption{
request.WithWaiterDelay(request.ConstantWaiterDelay(time.Duration(2) * time.Second)),
request.WithWaiterMaxAttempts(10),
}
if !reflect.DeepEqual(options, expected) {
t.Fatalf("expected != actual!! Expected: %#v; Actual: %#v.", expected, options)
}
}

View File

@ -1,58 +1,121 @@
package common
import (
"context"
"fmt"
"sync"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/mitchellh/multistep"
)
type StepAMIRegionCopy struct {
AccessConfig *AccessConfig
Regions []string
AMIKmsKeyId string
RegionKeyIds map[string]string
EncryptBootVolume bool
EncryptBootVolume config.Trilean // nil means preserve
Name string
OriginalRegion string
toDelete string
getRegionConn func(*AccessConfig, string) (ec2iface.EC2API, error)
AMISkipBuildRegion bool
}
func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction {
ec2conn := state.Get("ec2").(*ec2.EC2)
func (s *StepAMIRegionCopy) DeduplicateRegions(intermediary bool) {
// Deduplicates regions by looping over the list of regions and storing
// the regions as keys in a map. This saves users from accidentally copying
// regions twice if they've added a region to a map twice.
RegionMap := map[string]bool{}
RegionSlice := []string{}
// Original build region may or may not be present in the Regions list, so
// let's make absolutely sure it's in our map.
RegionMap[s.OriginalRegion] = true
for _, r := range s.Regions {
RegionMap[r] = true
}
if !intermediary || s.AMISkipBuildRegion {
// We don't want to copy back into the original region if we aren't
// using an intermediary image, so remove the original region from our
// map.
// We also don't want to copy back into the original region if the
// intermediary image is because we're skipping the build region.
delete(RegionMap, s.OriginalRegion)
}
// Now print all those keys into the region slice again
for k, _ := range RegionMap {
RegionSlice = append(RegionSlice, k)
}
s.Regions = RegionSlice
}
func (s *StepAMIRegionCopy) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
ui := state.Get("ui").(packer.Ui)
amis := state.Get("amis").(map[string]string)
snapshots := state.Get("snapshots").(map[string][]string)
ami := amis[*ec2conn.Config.Region]
intermediary, _ := state.Get("intermediary_image").(bool)
s.DeduplicateRegions(intermediary)
ami := amis[s.OriginalRegion]
// Make a note to delete the intermediary AMI if necessary.
if intermediary {
s.toDelete = ami
}
if s.EncryptBootVolume.True() {
// encrypt_boot is true, so we have to copy the temporary
// AMI with required encryption setting.
// temp image was created by stepCreateAMI.
if s.RegionKeyIds == nil {
s.RegionKeyIds = make(map[string]string)
}
// Make sure the kms_key_id for the original region is in the map
if _, ok := s.RegionKeyIds[s.OriginalRegion]; !ok {
s.RegionKeyIds[s.OriginalRegion] = s.AMIKmsKeyId
}
}
if len(s.Regions) == 0 {
return multistep.ActionContinue
}
ui.Say(fmt.Sprintf("Copying AMI (%s) to other regions...", ami))
ui.Say(fmt.Sprintf("Copying/Encrypting AMI (%s) to other regions...", ami))
var lock sync.Mutex
var wg sync.WaitGroup
var regKeyID string
errs := new(packer.MultiError)
wg.Add(len(s.Regions))
for _, region := range s.Regions {
if region == *ec2conn.Config.Region {
ui.Message(fmt.Sprintf(
"Avoiding copying AMI to duplicate region %s", region))
continue
}
wg.Add(1)
var regKeyID string
ui.Message(fmt.Sprintf("Copying to: %s", region))
if s.EncryptBootVolume {
if s.EncryptBootVolume.True() {
// Encrypt is true, explicitly
regKeyID = s.RegionKeyIds[region]
} else {
// Encrypt is nil or false; Make sure region key is empty
regKeyID = ""
}
go func(region string) {
defer wg.Done()
id, snapshotIds, err := amiRegionCopy(state, s.AccessConfig, s.Name, ami, region, *ec2conn.Config.Region, regKeyID)
id, snapshotIds, err := s.amiRegionCopy(ctx, state, s.AccessConfig,
s.Name, ami, region, s.OriginalRegion, regKeyID,
s.EncryptBootVolume.ToBoolPointer())
lock.Lock()
defer lock.Unlock()
amis[region] = id
@ -79,35 +142,95 @@ func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction {
}
func (s *StepAMIRegionCopy) Cleanup(state multistep.StateBag) {
// No cleanup...
ec2conn := state.Get("ec2").(*ec2.EC2)
ui := state.Get("ui").(packer.Ui)
if len(s.toDelete) == 0 {
return
}
// Delete the unencrypted amis and snapshots
ui.Say("Deregistering the AMI and deleting unencrypted temporary " +
"AMIs and snapshots")
resp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{
ImageIds: []*string{&s.toDelete},
})
if err != nil {
err := fmt.Errorf("Error describing AMI: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return
}
// Deregister image by name.
for _, i := range resp.Images {
_, err := ec2conn.DeregisterImage(&ec2.DeregisterImageInput{
ImageId: i.ImageId,
})
if err != nil {
err := fmt.Errorf("Error deregistering existing AMI: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return
}
ui.Say(fmt.Sprintf("Deregistered AMI id: %s", *i.ImageId))
// Delete snapshot(s) by image
for _, b := range i.BlockDeviceMappings {
if b.Ebs != nil && aws.StringValue(b.Ebs.SnapshotId) != "" {
_, err := ec2conn.DeleteSnapshot(&ec2.DeleteSnapshotInput{
SnapshotId: b.Ebs.SnapshotId,
})
if err != nil {
err := fmt.Errorf("Error deleting existing snapshot: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return
}
ui.Say(fmt.Sprintf("Deleted snapshot: %s", *b.Ebs.SnapshotId))
}
}
}
}
func getRegionConn(config *AccessConfig, target string) (ec2iface.EC2API, error) {
// Connect to the region where the AMI will be copied to
session, err := config.Session()
if err != nil {
return nil, fmt.Errorf("Error getting region connection for copy: %s", err)
}
regionconn := ec2.New(session.Copy(&aws.Config{
Region: aws.String(target),
}))
return regionconn, nil
}
// amiRegionCopy does a copy for the given AMI to the target region and
// returns the resulting ID and snapshot IDs, or error.
func amiRegionCopy(state multistep.StateBag, config *AccessConfig, name string, imageId string,
target string, source string, keyID string) (string, []string, error) {
func (s *StepAMIRegionCopy) amiRegionCopy(ctx context.Context, state multistep.StateBag, config *AccessConfig, name, imageId,
target, source, keyId string, encrypt *bool) (string, []string, error) {
snapshotIds := []string{}
isEncrypted := false
// Connect to the region where the AMI will be copied to
session, err := config.Session()
if s.getRegionConn == nil {
s.getRegionConn = getRegionConn
}
regionconn, err := s.getRegionConn(config, target)
if err != nil {
return "", snapshotIds, err
}
// if we've provided a map of key ids to regions, use those keys.
if len(keyID) > 0 {
isEncrypted = true
}
regionconn := ec2.New(session.Copy(&aws.Config{
Region: aws.String(target)},
))
resp, err := regionconn.CopyImage(&ec2.CopyImageInput{
SourceRegion: &source,
SourceImageId: &imageId,
Name: &name,
Encrypted: aws.Bool(isEncrypted),
KmsKeyId: aws.String(keyID),
Encrypted: encrypt,
KmsKeyId: aws.String(keyId),
})
if err != nil {
@ -115,14 +238,8 @@ func amiRegionCopy(state multistep.StateBag, config *AccessConfig, name string,
imageId, target, err)
}
stateChange := StateChangeConf{
Pending: []string{"pending"},
Target: "available",
Refresh: AMIStateRefreshFunc(regionconn, *resp.ImageId),
StepState: state,
}
if _, err := WaitForState(&stateChange); err != nil {
// Wait for the image to become ready
if err := WaitUntilAMIAvailable(ctx, regionconn, *resp.ImageId); err != nil {
return "", snapshotIds, fmt.Errorf("Error waiting for AMI (%s) in region (%s): %s",
*resp.ImageId, target, err)
}

Some files were not shown because too many files have changed in this diff Show More