Compare commits
1 Commits
master
...
revert-659
Author | SHA1 | Date | |
---|---|---|---|
|
e46e2c1469 |
@ -1,228 +0,0 @@
|
|||||||
orbs:
|
|
||||||
win: circleci/windows@1.0.0
|
|
||||||
|
|
||||||
version: 2.1
|
|
||||||
|
|
||||||
executors:
|
|
||||||
golang:
|
|
||||||
docker:
|
|
||||||
- image: docker.mirror.hashicorp.services/circleci/golang:1.16
|
|
||||||
resource_class: medium+
|
|
||||||
darwin:
|
|
||||||
macos:
|
|
||||||
xcode: "12.0.0"
|
|
||||||
|
|
||||||
commands:
|
|
||||||
install-go-run-tests-unix:
|
|
||||||
parameters:
|
|
||||||
GOOS:
|
|
||||||
type: string
|
|
||||||
GOVERSION:
|
|
||||||
type: string
|
|
||||||
HOME:
|
|
||||||
type: string
|
|
||||||
default: "~"
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run: curl https://dl.google.com/go/go<< parameters.GOVERSION >>.<< parameters.GOOS >>-amd64.tar.gz | tar -C << parameters.HOME >>/ -xz
|
|
||||||
- run: << parameters.HOME >>/go/bin/go test ./... -coverprofile=coverage.txt -covermode=atomic
|
|
||||||
install-go-run-tests-windows:
|
|
||||||
parameters:
|
|
||||||
GOVERSION:
|
|
||||||
type: string
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run: curl https://dl.google.com/go/go<< parameters.GOVERSION >>.windows-amd64.zip --output ~/go<< parameters.GOVERSION >>.windows-amd64.zip
|
|
||||||
- run: unzip ~/go<< parameters.GOVERSION >>.windows-amd64.zip -d ~/
|
|
||||||
- run: ~/go/bin/go test ./... -coverprofile=coverage.txt -covermode=atomic
|
|
||||||
build-and-persist-packer-binary:
|
|
||||||
parameters:
|
|
||||||
GOOS:
|
|
||||||
type: string
|
|
||||||
GOARCH:
|
|
||||||
default: "amd64"
|
|
||||||
type: string
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run: GOOS=<< parameters.GOOS >> GOARCH=<<parameters.GOARCH>> go build -ldflags="-s -w -X github.com/hashicorp/packer/version.GitCommit=${CIRCLE_SHA1}" -o ./pkg/packer_<< parameters.GOOS >>_<< parameters.GOARCH >> .
|
|
||||||
- run: zip ./pkg/packer_<< parameters.GOOS >>_<< parameters.GOARCH >>.zip ./pkg/packer_<< parameters.GOOS >>_<< parameters.GOARCH >>
|
|
||||||
- run: rm ./pkg/packer_<< parameters.GOOS >>_<< parameters.GOARCH >>
|
|
||||||
- persist_to_workspace:
|
|
||||||
root: .
|
|
||||||
paths:
|
|
||||||
- ./pkg/
|
|
||||||
|
|
||||||
# Golang CircleCI 2.0 configuration file
|
|
||||||
#
|
|
||||||
# Check https://circleci.com/docs/2.0/language-go/ for more details
|
|
||||||
jobs:
|
|
||||||
test-linux:
|
|
||||||
executor: golang
|
|
||||||
resource_class: large
|
|
||||||
working_directory: /go/src/github.com/hashicorp/packer
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run: TESTARGS="-coverprofile=coverage.txt -covermode=atomic" make ci
|
|
||||||
test-darwin:
|
|
||||||
executor: darwin
|
|
||||||
working_directory: ~/go/github.com/hashicorp/packer
|
|
||||||
steps:
|
|
||||||
- install-go-run-tests-unix:
|
|
||||||
GOOS: darwin
|
|
||||||
GOVERSION: "1.16"
|
|
||||||
test-windows:
|
|
||||||
executor:
|
|
||||||
name: win/vs2019
|
|
||||||
shell: bash.exe
|
|
||||||
steps:
|
|
||||||
- install-go-run-tests-windows:
|
|
||||||
GOVERSION: "1.16"
|
|
||||||
check-lint:
|
|
||||||
executor: golang
|
|
||||||
resource_class: xlarge
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run: git fetch --all
|
|
||||||
- run:
|
|
||||||
command: make ci-lint
|
|
||||||
no_output_timeout: 30m
|
|
||||||
check-fmt:
|
|
||||||
executor: golang
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run: make fmt-check
|
|
||||||
check-generate:
|
|
||||||
executor: golang
|
|
||||||
working_directory: /go/src/github.com/hashicorp/packer
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run: make generate-check
|
|
||||||
build_linux:
|
|
||||||
executor: golang
|
|
||||||
steps:
|
|
||||||
- build-and-persist-packer-binary:
|
|
||||||
GOOS: linux
|
|
||||||
build_windows:
|
|
||||||
executor: golang
|
|
||||||
working_directory: /go/src/github.com/hashicorp/packer
|
|
||||||
steps:
|
|
||||||
- build-and-persist-packer-binary:
|
|
||||||
GOOS: windows
|
|
||||||
build_darwin:
|
|
||||||
executor: golang
|
|
||||||
working_directory: /go/src/github.com/hashicorp/packer
|
|
||||||
steps:
|
|
||||||
- build-and-persist-packer-binary:
|
|
||||||
GOOS: darwin
|
|
||||||
build_darwin_arm64:
|
|
||||||
executor: golang
|
|
||||||
working_directory: /go/src/github.com/hashicorp/packer
|
|
||||||
steps:
|
|
||||||
- build-and-persist-packer-binary:
|
|
||||||
GOOS: darwin
|
|
||||||
GOARCH: arm64
|
|
||||||
build_freebsd:
|
|
||||||
executor: golang
|
|
||||||
working_directory: /go/src/github.com/hashicorp/packer
|
|
||||||
steps:
|
|
||||||
- build-and-persist-packer-binary:
|
|
||||||
GOOS: freebsd
|
|
||||||
build_solaris:
|
|
||||||
executor: golang
|
|
||||||
working_directory: /go/src/github.com/hashicorp/packer
|
|
||||||
steps:
|
|
||||||
- build-and-persist-packer-binary:
|
|
||||||
GOOS: solaris
|
|
||||||
build_openbsd:
|
|
||||||
executor: golang
|
|
||||||
working_directory: /go/src/github.com/hashicorp/packer
|
|
||||||
steps:
|
|
||||||
- build-and-persist-packer-binary:
|
|
||||||
GOOS: openbsd
|
|
||||||
store_artifacts:
|
|
||||||
executor: golang
|
|
||||||
steps:
|
|
||||||
- attach_workspace:
|
|
||||||
at: .
|
|
||||||
- store_artifacts:
|
|
||||||
path: ./pkg/
|
|
||||||
destination: /
|
|
||||||
build-website-docker-image:
|
|
||||||
docker:
|
|
||||||
- image: docker.mirror.hashicorp.services/circleci/buildpack-deps
|
|
||||||
shell: /usr/bin/env bash -euo pipefail -c
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- setup_remote_docker
|
|
||||||
- run:
|
|
||||||
name: Build Docker Image if Necessary
|
|
||||||
command: |
|
|
||||||
IMAGE_TAG=$(cat website/Dockerfile website/package-lock.json | sha256sum | awk '{print $1;}')
|
|
||||||
echo "Using $IMAGE_TAG"
|
|
||||||
if curl https://hub.docker.com/v2/repositories/hashicorp/packer-website/tags/$IMAGE_TAG -fsL > /dev/null; then
|
|
||||||
echo "Dependencies have not changed, not building a new website docker image."
|
|
||||||
else
|
|
||||||
cd website/
|
|
||||||
docker login -u $WEBSITE_DOCKER_USER -p $WEBSITE_DOCKER_PASS
|
|
||||||
docker build -t hashicorp/packer-website:$IMAGE_TAG .
|
|
||||||
docker tag hashicorp/packer-website:$IMAGE_TAG hashicorp/packer-website:latest
|
|
||||||
docker push hashicorp/packer-website
|
|
||||||
fi
|
|
||||||
algolia-index:
|
|
||||||
docker:
|
|
||||||
- image: docker.mirror.hashicorp.services/node:12
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run:
|
|
||||||
name: Push content to Algolia Index
|
|
||||||
command: |
|
|
||||||
if [ "$CIRCLE_REPOSITORY_URL" != "git@github.com:hashicorp/packer.git" ]; then
|
|
||||||
echo "Not Packer OSS Repo, not indexing Algolia"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
cd website/
|
|
||||||
npm install
|
|
||||||
node scripts/index_search_content.js
|
|
||||||
|
|
||||||
workflows:
|
|
||||||
version: 2
|
|
||||||
test:
|
|
||||||
jobs:
|
|
||||||
- test-linux
|
|
||||||
- test-darwin
|
|
||||||
- test-windows
|
|
||||||
check-code:
|
|
||||||
jobs:
|
|
||||||
- check-lint
|
|
||||||
- check-fmt
|
|
||||||
- check-generate
|
|
||||||
build_packer_binaries:
|
|
||||||
jobs:
|
|
||||||
- build_linux
|
|
||||||
- build_darwin
|
|
||||||
- build_darwin_arm64
|
|
||||||
- build_windows
|
|
||||||
- build_freebsd
|
|
||||||
- build_openbsd
|
|
||||||
- build_solaris
|
|
||||||
- store_artifacts:
|
|
||||||
requires:
|
|
||||||
- build_linux
|
|
||||||
- build_darwin
|
|
||||||
- build_darwin_arm64
|
|
||||||
- build_windows
|
|
||||||
- build_freebsd
|
|
||||||
- build_openbsd
|
|
||||||
- build_solaris
|
|
||||||
website:
|
|
||||||
jobs:
|
|
||||||
- build-website-docker-image:
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- master
|
|
||||||
- algolia-index:
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- stable-website
|
|
9
.gitattributes
vendored
9
.gitattributes
vendored
@ -1,13 +1,4 @@
|
|||||||
* text=auto
|
* text=auto
|
||||||
*.go text eol=lf
|
*.go text eol=lf
|
||||||
*.sh text eol=lf
|
*.sh text eol=lf
|
||||||
*.json text eol=lf
|
|
||||||
*.md text eol=lf
|
|
||||||
*.mdx text eol=lf
|
|
||||||
*.ps1 text eol=lf
|
|
||||||
*.hcl text eol=lf
|
|
||||||
*.tmpl text eol=lf
|
|
||||||
*.txt text eol=lf
|
|
||||||
go.mod text eol=lf
|
|
||||||
go.sum text eol=lf
|
|
||||||
common/test-fixtures/root/* eol=lf
|
common/test-fixtures/root/* eol=lf
|
||||||
|
519
.github/CONTRIBUTING.md
vendored
519
.github/CONTRIBUTING.md
vendored
@ -1,7 +1,7 @@
|
|||||||
# Contributing to Packer
|
# Contributing to Packer
|
||||||
|
|
||||||
**First:** if you're unsure or afraid of _anything_, just ask or submit the
|
**First:** if you're unsure or afraid of _anything_, just ask or submit the
|
||||||
issue or pull request anyway. You won't be yelled at for giving your best
|
issue or pull request anyways. You won't be yelled at for giving your best
|
||||||
effort. The worst that can happen is that you'll be politely asked to change
|
effort. The worst that can happen is that you'll be politely asked to change
|
||||||
something. We appreciate any sort of contributions, and don't want a wall of
|
something. We appreciate any sort of contributions, and don't want a wall of
|
||||||
rules to get in the way of that.
|
rules to get in the way of that.
|
||||||
@ -11,30 +11,24 @@ contribute to the project, read on. This document will cover what we're looking
|
|||||||
for. By addressing all the points we're looking for, it raises the chances we
|
for. By addressing all the points we're looking for, it raises the chances we
|
||||||
can quickly merge or address your contributions.
|
can quickly merge or address your contributions.
|
||||||
|
|
||||||
When contributing in any way to the Packer project (new issue, PR, etc), please
|
|
||||||
be aware that our team identifies with many gender pronouns. Please remember to
|
|
||||||
use nonbinary pronouns (they/them) and gender neutral language ("Hello folks")
|
|
||||||
when addressing our team. For more reading on our code of conduct, please see the
|
|
||||||
[HashiCorp community guidelines](https://www.hashicorp.com/community-guidelines).
|
|
||||||
|
|
||||||
## Issues
|
## Issues
|
||||||
|
|
||||||
### Reporting an Issue
|
### Reporting an Issue
|
||||||
|
|
||||||
- Make sure you test against the latest released version. It is possible we
|
* Make sure you test against the latest released version. It is possible we
|
||||||
already fixed the bug you're experiencing.
|
already fixed the bug you're experiencing.
|
||||||
|
|
||||||
- Run the command with debug output with the environment variable `PACKER_LOG`.
|
* Run the command with debug output with the environment variable `PACKER_LOG`.
|
||||||
For example: `PACKER_LOG=1 packer build template.pkr.hcl`. Take the _entire_
|
For example: `PACKER_LOG=1 packer build template.json`. Take the _entire_
|
||||||
output and create a [gist](https://gist.github.com) for linking to in your
|
output and create a [gist](https://gist.github.com) for linking to in your
|
||||||
issue. Packer should strip sensitive keys from the output, but take a look
|
issue. Packer should strip sensitive keys from the output, but take a look
|
||||||
through just in case.
|
through just in case.
|
||||||
|
|
||||||
- Provide a reproducible test case. If a contributor can't reproduce an issue,
|
* Provide a reproducible test case. If a contributor can't reproduce an issue,
|
||||||
then it dramatically lowers the chances it'll get fixed. And in some cases,
|
then it dramatically lowers the chances it'll get fixed. And in some cases,
|
||||||
the issue will eventually be closed.
|
the issue will eventually be closed.
|
||||||
|
|
||||||
- Respond promptly to any questions made by the Packer team to your issue. Stale
|
* Respond promptly to any questions made by the Packer team to your issue. Stale
|
||||||
issues will be closed.
|
issues will be closed.
|
||||||
|
|
||||||
### Issue Lifecycle
|
### Issue Lifecycle
|
||||||
@ -43,7 +37,7 @@ when addressing our team. For more reading on our code of conduct, please see th
|
|||||||
|
|
||||||
2. The issue is verified and categorized by a Packer collaborator.
|
2. The issue is verified and categorized by a Packer collaborator.
|
||||||
Categorization is done via tags. For example, bugs are marked as "bugs" and
|
Categorization is done via tags. For example, bugs are marked as "bugs" and
|
||||||
simple fixes are marked as "good first issue".
|
easy fixes are marked as "easy".
|
||||||
|
|
||||||
3. Unless it is critical, the issue is left for a period of time (sometimes many
|
3. Unless it is critical, the issue is left for a period of time (sometimes many
|
||||||
weeks), giving outside contributors a chance to address the issue.
|
weeks), giving outside contributors a chance to address the issue.
|
||||||
@ -52,34 +46,23 @@ when addressing our team. For more reading on our code of conduct, please see th
|
|||||||
referenced in the commit message so that the code that fixes it is clearly
|
referenced in the commit message so that the code that fixes it is clearly
|
||||||
linked.
|
linked.
|
||||||
|
|
||||||
5. Sometimes, if you have a specialized environment or use case, the maintainers
|
5. The issue is closed.
|
||||||
may ask for your help testing the patch. You are able to download an
|
|
||||||
experimental binary of Packer containing the Pull Request's patch via from
|
|
||||||
the Pull Request page on github. You can do this by scrolling to the
|
|
||||||
"checks" section on github, and clicking "details" on the
|
|
||||||
"store_artifacts" check. This will take you to Packer's Circle CI page for
|
|
||||||
the build, and you will be able to click a tab named "Artifacts" which will
|
|
||||||
contain zipped Packer binaries for each major OS architecture.
|
|
||||||
|
|
||||||
6. The issue is closed.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Setting up Go
|
## Setting up Go
|
||||||
|
|
||||||
If you have never worked with Go before, you will have to install its
|
If you have never worked with Go before, you will have to install its
|
||||||
runtime in order to build packer.
|
runtime in order to build packer.
|
||||||
|
|
||||||
1. This project always releases from the latest version of golang.
|
1. [Install go](https://golang.org/doc/install#install)
|
||||||
[Install go](https://golang.org/doc/install#install) To properly build from
|
|
||||||
source, you need to have golang >= v1.16
|
|
||||||
|
|
||||||
## Setting up Packer for dev
|
## Setting up Packer for dev
|
||||||
|
|
||||||
If/when you have go installed you can already `go get` packer and `make` in
|
If/when you have go installed you can already `go get` packer and `make` in
|
||||||
order to compile and test Packer. These instructions target
|
order to compile and test Packer. These instructions target
|
||||||
POSIX-like environments (macOS, Linux, Cygwin, etc.) so you may need to
|
POSIX-like environments (Mac OS X, Linux, Cygwin, etc.) so you may need to
|
||||||
adjust them for Windows or other shells.
|
adjust them for Windows or other shells.
|
||||||
|
The instructions below are for go 1.7. or later.
|
||||||
|
|
||||||
|
|
||||||
1. Download the Packer source (and its dependencies) by running
|
1. Download the Packer source (and its dependencies) by running
|
||||||
`go get github.com/hashicorp/packer`. This will download the Packer source to
|
`go get github.com/hashicorp/packer`. This will download the Packer source to
|
||||||
@ -98,29 +81,11 @@ adjust them for Windows or other shells.
|
|||||||
4. After running building Packer successfully, use
|
4. After running building Packer successfully, use
|
||||||
`$GOPATH/src/github.com/hashicorp/packer/bin/packer` to build a machine and
|
`$GOPATH/src/github.com/hashicorp/packer/bin/packer` to build a machine and
|
||||||
verify your changes work. For instance:
|
verify your changes work. For instance:
|
||||||
`$GOPATH/src/github.com/hashicorp/packer/bin/packer build template.pkr.hcl`.
|
`$GOPATH/src/github.com/hashicorp/packer/bin/packer build template.json`.
|
||||||
|
|
||||||
5. If everything works well and the tests pass, run `go fmt` on your code before
|
5. If everything works well and the tests pass, run `go fmt` on your code before
|
||||||
submitting a pull-request.
|
submitting a pull-request.
|
||||||
|
|
||||||
### Windows Systems
|
|
||||||
|
|
||||||
On windows systems you need at least the [MinGW Tools](http://www.mingw.org/), e.g. install via [choco](https://chocolatey.org/):
|
|
||||||
|
|
||||||
```
|
|
||||||
choco install mingw -y
|
|
||||||
```
|
|
||||||
|
|
||||||
This installs the GCC compiler, as well as a `mingw32-make` which can be used wherever
|
|
||||||
this documentation mentions `make`
|
|
||||||
|
|
||||||
when building using `go` you also need to mention the windows
|
|
||||||
executable extension
|
|
||||||
|
|
||||||
```
|
|
||||||
go build -o bin/packer.exe
|
|
||||||
```
|
|
||||||
|
|
||||||
### Opening an Pull Request
|
### Opening an Pull Request
|
||||||
|
|
||||||
Thank you for contributing! When you are ready to open a pull-request, you will
|
Thank you for contributing! When you are ready to open a pull-request, you will
|
||||||
@ -142,163 +107,64 @@ From there, open your fork in your browser to open a new pull-request.
|
|||||||
will break if you `git clone` your fork instead of using `go get` on the main
|
will break if you `git clone` your fork instead of using `go get` on the main
|
||||||
Packer project.
|
Packer project.
|
||||||
|
|
||||||
**Note:** See '[Working with
|
|
||||||
forks](https://help.github.com/articles/working-with-forks/)' for a better way
|
|
||||||
to use `git push ...`.
|
|
||||||
|
|
||||||
### Pull Request Lifecycle
|
### Pull Request Lifecycle
|
||||||
|
|
||||||
1. You are welcome to submit your pull request for commentary or review before
|
1. You are welcome to submit your pull request for commentary or review before
|
||||||
it is fully completed. Please prefix the title of your pull request with
|
it is fully completed. Please prefix the title of your pull request with
|
||||||
"[WIP]" to indicate this. It's also a good idea to include specific questions
|
"[WIP]" to indicate this. It's also a good idea to include specific questions
|
||||||
or items you'd like feedback on.
|
or items you'd like feedback on.
|
||||||
|
|
||||||
2. Once you believe your pull request is ready to be merged, you can remove any
|
1. Once you believe your pull request is ready to be merged, you can remove any
|
||||||
"[WIP]" prefix from the title and a core team member will review.
|
"[WIP]" prefix from the title and a core team member will review.
|
||||||
|
|
||||||
3. One of Packer's core team members will look over your contribution and
|
1. One of Packer's core team members will look over your contribution and
|
||||||
either merge, or provide comments letting you know if there is anything left
|
either provide comments letting you know if there is anything left to do. We
|
||||||
to do. We do our best to provide feedback in a timely manner, but it may take
|
do our best to provide feedback in a timely manner, but it may take some time
|
||||||
some time for us to respond. We may also have questions that we need answered
|
for us to respond.
|
||||||
about the code, either because something doesn't make sense to us or because
|
|
||||||
we want to understand your thought process.
|
|
||||||
|
|
||||||
4. If we have requested changes, you can either make those changes or, if you
|
1. Once all outstanding comments and checklist items have been addressed, your
|
||||||
disagree with the suggested changes, we can have a conversation about our
|
contribution will be merged! Merged PRs will be included in the next
|
||||||
reasoning and agree on a path forward. This may be a multi-step process. Our
|
Packer release. The core team takes care of updating the CHANGELOG as they
|
||||||
view is that pull requests are a chance to collaborate, and we welcome
|
merge.
|
||||||
conversations about how to do things better. It is the contributor's
|
|
||||||
responsibility to address any changes requested. While reviewers are happy to
|
|
||||||
give guidance, it is unsustainable for us to perform the coding work necessary
|
|
||||||
to get a PR into a mergeable state.
|
|
||||||
|
|
||||||
5. Once all outstanding comments and checklist items have been addressed, your
|
1. In rare cases, we might decide that a PR should be closed. We'll make sure to
|
||||||
contribution will be merged! Merged PRs will be included in the next
|
provide clear reasoning when this happens.
|
||||||
Packer release. The core team takes care of updating the
|
|
||||||
[CHANGELOG.md](../CHANGELOG.md) as they merge.
|
|
||||||
|
|
||||||
6. In rare cases, we might decide that a PR should be closed without merging.
|
|
||||||
We'll make sure to provide clear reasoning when this happens.
|
|
||||||
|
|
||||||
### Tips for Working on Packer
|
### Tips for Working on Packer
|
||||||
|
|
||||||
#### Getting Your Pull Requests Merged Faster
|
|
||||||
|
|
||||||
It is much easier to review pull requests that are:
|
|
||||||
|
|
||||||
1. Well-documented: Try to explain in the pull request comments what your
|
|
||||||
change does, why you have made the change, and provide instructions for how
|
|
||||||
to produce the new behavior introduced in the pull request. If you can,
|
|
||||||
provide screen captures or terminal output to show what the changes look
|
|
||||||
like. This helps the reviewers understand and test the change.
|
|
||||||
|
|
||||||
2. Small: Try to only make one change per pull request. If you found two bugs
|
|
||||||
and want to fix them both, that's _awesome_, but it's still best to submit
|
|
||||||
the fixes as separate pull requests. This makes it much easier for reviewers
|
|
||||||
to keep in their heads all of the implications of individual code changes,
|
|
||||||
and that means the PR takes less effort and energy to merge. In general, the
|
|
||||||
smaller the pull request, the sooner reviewers will be able to make time to
|
|
||||||
review it.
|
|
||||||
|
|
||||||
3. Passing Tests: Based on how much time we have, we may not review pull
|
|
||||||
requests which aren't passing our tests. (Look below for advice on how to
|
|
||||||
run unit tests). If you need help figuring out why tests are failing, please
|
|
||||||
feel free to ask, but while we're happy to give guidance it is generally
|
|
||||||
your responsibility to make sure that tests are passing. If your pull request
|
|
||||||
changes an interface or invalidates an assumption that causes a bunch of
|
|
||||||
tests to fail, then you need to fix those tests before we can merge your PR.
|
|
||||||
|
|
||||||
If we request changes, try to make those changes in a timely manner. Otherwise,
|
|
||||||
PRs can go stale and be a lot more work for all of us to merge in the future.
|
|
||||||
|
|
||||||
Even with everyone making their best effort to be responsive, it can be
|
|
||||||
time-consuming to get a PR merged. It can be frustrating to deal with
|
|
||||||
the back-and-forth as we make sure that we understand the changes fully. Please
|
|
||||||
bear with us, and please know that we appreciate the time and energy you put
|
|
||||||
into the project.
|
|
||||||
|
|
||||||
#### Working on forks
|
#### Working on forks
|
||||||
|
|
||||||
The easiest way to work on a fork is to set it as a remote of the Packer
|
The easiest way to work on a fork is to set it as a remote of the Packer
|
||||||
project. After following the steps in "Setting up Go to work on Packer":
|
project. After following the steps in "Setting up Go to work on Packer":
|
||||||
|
|
||||||
1. Navigate to the code:
|
1. Navigate to `$GOPATH/src/github.com/hashicorp/packer`
|
||||||
|
2. Add the remote by running
|
||||||
`cd $GOPATH/src/github.com/hashicorp/packer`
|
`git remote add <name of remote> <github url of fork>`. For example:
|
||||||
|
`git remote add mwhooker https://github.com/mwhooker/packer.git`.
|
||||||
2. Add the remote by running:
|
3. Checkout a feature branch: `git checkout -b new-feature`
|
||||||
|
4. Make changes
|
||||||
`git remote add <name of remote> <github url of fork>`
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
`git remote add mwhooker https://github.com/mwhooker/packer.git`
|
|
||||||
|
|
||||||
3. Checkout a feature branch:
|
|
||||||
|
|
||||||
`git checkout -b new-feature`
|
|
||||||
|
|
||||||
4. Make changes.
|
|
||||||
5. (Optional) Push your changes to the fork:
|
5. (Optional) Push your changes to the fork:
|
||||||
|
|
||||||
`git push -u <name of remote> new-feature`
|
`git push -u <name of remote> new-feature`
|
||||||
|
|
||||||
This way you can push to your fork to create a PR, but the code on disk still
|
This way you can push to your fork to create a PR, but the code on disk still
|
||||||
lives in the spot where the go cli tools are expecting to find it.
|
lives in the spot where the go cli tools are expecting to find it.
|
||||||
|
|
||||||
#### Go modules & go vendor
|
#### Govendor
|
||||||
|
|
||||||
If you are submitting a change that requires new or updated dependencies,
|
If you are submitting a change that requires new or updated dependencies, please
|
||||||
please include them in `go.mod`/`go.sum` and in the `vendor/` folder. This
|
include them in `vendor/vendor.json` and in the `vendor/` folder. This helps
|
||||||
helps everything get tested properly in CI.
|
everything get tested properly in CI.
|
||||||
|
|
||||||
Note that you will need to use [go
|
Note that you will need to use [govendor](https://github.com/kardianos/govendor)
|
||||||
mod](https://github.com/golang/go/wiki/Modules) to do this. This step is
|
to do this. This step is recommended but not required; if you don't use govendor
|
||||||
recommended but not required.
|
please indicate in your PR which dependencies have changed and to what versions.
|
||||||
|
|
||||||
Use `go get <project>` to add dependencies to the project and `go mod vendor`
|
Use `govendor fetch <project>` to add dependencies to the project. See
|
||||||
to make vendored copy of dependencies. See [go mod quick
|
[govendor quick start](https://github.com/kardianos/govendor#quick-start-also-see-the-faq)
|
||||||
start](https://github.com/golang/go/wiki/Modules#quick-start) for examples.
|
for examples.
|
||||||
|
|
||||||
Please only apply the minimal vendor changes to get your PR to work. Packer
|
Please only apply the minimal vendor changes to get your PR to work. Packer does
|
||||||
does not attempt to track the latest version for each dependency.
|
not attempt to track the latest version for each dependency.
|
||||||
|
|
||||||
#### Code generation
|
|
||||||
|
|
||||||
Packer relies on `go generate` to generate a [peg parser for boot
|
|
||||||
commands](https://github.com/hashicorp/packer/blob/master/packer-plugin-sdk/bootcommand/boot_command.go),
|
|
||||||
[docs](https://github.com/hashicorp/packer/blob/master/website/pages/partials/builder/amazon/chroot/_Config-not-required.mdx)
|
|
||||||
and HCL2's bridging code. Packer's testing suite will run `make check-generate`
|
|
||||||
to check that all the generated files Packer needs are what they should be.
|
|
||||||
`make generate` re-generates all these file and can take a while depending on
|
|
||||||
your machine's performances. To make it faster it is recommended to run
|
|
||||||
localized code generation. Say you are working on the Amazon builder: running
|
|
||||||
`go generate ./builder/amazon/...` will do that for you. Make sure that the
|
|
||||||
latest code generation tool is installed by running `make install-gen-deps`.
|
|
||||||
|
|
||||||
#### Code linting
|
|
||||||
|
|
||||||
Packer relies on [golangci-lint](https://github.com/golangci/golangci-lint) for linting its Go code base, excluding any generated code created by `go generate`. Linting is executed on new files during Travis builds via `make ci`; the linting of existing code base is only executed when running `make lint`. Linting a large project like Packer is an iterative process so existing code base will have issues that are actively being fixed; pull-requests that fix existing linting issues are always welcomed :smile:.
|
|
||||||
|
|
||||||
The main configuration for golangci-lint is the `.golangci.yml` in the project root. See `golangci-lint --help` for a list of flags that can be used to override the default configuration.
|
|
||||||
|
|
||||||
Run golangci-lint on the entire Packer code base.
|
|
||||||
|
|
||||||
```
|
|
||||||
make lint
|
|
||||||
```
|
|
||||||
|
|
||||||
Run golangci-lint on a single pkg or directory; PKG_NAME expands to /builder/amazon/...
|
|
||||||
|
|
||||||
```
|
|
||||||
make lint PKG_NAME=builder/amazon
|
|
||||||
```
|
|
||||||
|
|
||||||
Note: linting on Travis uses the `--new-from-rev` flag to only lint new files added within a branch or pull-request. To run this check locally you can use the `ci-lint` make target. See [golangci-lint in CI](https://github.com/golangci/golangci-lint#faq) for more information.
|
|
||||||
|
|
||||||
```
|
|
||||||
make ci-lint
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Running Unit Tests
|
#### Running Unit Tests
|
||||||
|
|
||||||
@ -308,15 +174,15 @@ You can run tests for individual packages using commands like this:
|
|||||||
make test TEST=./builder/amazon/...
|
make test TEST=./builder/amazon/...
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Running Builder Acceptance Tests
|
#### Running Acceptance Tests
|
||||||
|
|
||||||
Packer has [acceptance tests](https://en.wikipedia.org/wiki/Acceptance_testing)
|
Packer has [acceptance tests](https://en.wikipedia.org/wiki/Acceptance_testing)
|
||||||
for various builders. These typically require an API key (AWS, GCE), or
|
for various builders. These typically require an API key (AWS, GCE), or
|
||||||
additional software to be installed on your computer (VirtualBox, VMware).
|
additional software to be installed on your computer (VirtualBox, VMware).
|
||||||
|
|
||||||
If you're working on a new builder or builder feature and want to verify it is
|
If you're working on a new builder or builder feature and want verify it is
|
||||||
functioning (and also hasn't broken anything else), we recommend creating or
|
functioning (and also hasn't broken anything else), we recommend running the
|
||||||
running the acceptance tests.
|
acceptance tests.
|
||||||
|
|
||||||
**Warning:** The acceptance tests create/destroy/modify _real resources_, which
|
**Warning:** The acceptance tests create/destroy/modify _real resources_, which
|
||||||
may incur costs for real money. In the presence of a bug, it is possible that
|
may incur costs for real money. In the presence of a bug, it is possible that
|
||||||
@ -346,288 +212,3 @@ make testacc TEST=./builder/amazon/ebs TESTARGS="-run TestBuilderAcc_forceDelete
|
|||||||
Acceptance tests typically require other environment variables to be set for
|
Acceptance tests typically require other environment variables to be set for
|
||||||
things such as API tokens and keys. Each test should error and tell you which
|
things such as API tokens and keys. Each test should error and tell you which
|
||||||
credentials are missing, so those are not documented here.
|
credentials are missing, so those are not documented here.
|
||||||
|
|
||||||
#### Running Provisioner Acceptance Tests
|
|
||||||
|
|
||||||
**Warning:** The acceptance tests create/destroy/modify _real resources_, which
|
|
||||||
may incur costs for real money. In the presence of a bug, it is possible that
|
|
||||||
resources may be left behind, which can cost money even though you were not
|
|
||||||
using them. We recommend running tests in an account used only for that purpose
|
|
||||||
so it is easy to see if there are any dangling resources, and so production
|
|
||||||
resources are not accidentally destroyed or overwritten during testing.
|
|
||||||
Also, these typically require an API key (AWS, GCE), or additional software
|
|
||||||
to be installed on your computer (VirtualBox, VMware).
|
|
||||||
|
|
||||||
To run the Provisioners Acceptance Tests you should use the
|
|
||||||
**ACC_TEST_BUILDERS** environment variable to tell the tests which builder the
|
|
||||||
test should be run against.
|
|
||||||
|
|
||||||
Examples of usage:
|
|
||||||
|
|
||||||
- Run the Shell provisioner acceptance tests against the Amazon EBS builder.
|
|
||||||
```
|
|
||||||
ACC_TEST_BUILDERS=amazon-ebs go test ./provisioner/shell/... -v -timeout=1h
|
|
||||||
```
|
|
||||||
- Do the same but using the Makefile
|
|
||||||
```
|
|
||||||
ACC_TEST_BUILDERS=amazon-ebs make provisioners-acctest TEST=./provisioner/shell
|
|
||||||
```
|
|
||||||
- Run all provisioner acceptance tests against the Amazon EBS builder.
|
|
||||||
```
|
|
||||||
ACC_TEST_BUILDERS=amazon-ebs make provisioners-acctest TEST=./...
|
|
||||||
```
|
|
||||||
- Run all provisioner acceptance tests against all builders whenever they are compatible.
|
|
||||||
```
|
|
||||||
ACC_TEST_BUILDERS=all make provisioners-acctest TEST=./...
|
|
||||||
```
|
|
||||||
|
|
||||||
The **ACC_TEST_BUILDERS** env variable accepts a list of builders separated by
|
|
||||||
commas. (e.g. `ACC_TEST_BUILDERS=amazon-ebs,virtualbox-iso`)
|
|
||||||
|
|
||||||
|
|
||||||
#### Writing Provisioner Acceptance Tests
|
|
||||||
|
|
||||||
Packer has implemented a `ProvisionerTestCase` structure to help write
|
|
||||||
provisioner acceptance tests.
|
|
||||||
|
|
||||||
```go
|
|
||||||
type ProvisionerTestCase struct {
|
|
||||||
// Check is called after this step is executed in order to test that
|
|
||||||
// the step executed successfully. If this is not set, then the next
|
|
||||||
// step will be called
|
|
||||||
Check func(*exec.Cmd, string) error
|
|
||||||
// IsCompatible checks whether a provisioner is able to run against a
|
|
||||||
// given builder type and guest operating system, and returns a boolean.
|
|
||||||
// if it returns true, the test combination is okay to run. If false, the
|
|
||||||
// test combination is not okay to run.
|
|
||||||
IsCompatible func(builderType string, BuilderGuestOS string) bool
|
|
||||||
// Name is the name of the test case. Be simple but unique and descriptive.
|
|
||||||
Name string
|
|
||||||
// Setup, if non-nil, will be called once before the test case
|
|
||||||
// runs. This can be used for some setup like setting environment
|
|
||||||
// variables, or for validation prior to the
|
|
||||||
// test running. For example, you can use this to make sure certain
|
|
||||||
// binaries are installed, or text fixtures are in place.
|
|
||||||
Setup func() error
|
|
||||||
// Teardown will be called before the test case is over regardless
|
|
||||||
// of if the test succeeded or failed. This should return an error
|
|
||||||
// in the case that the test can't guarantee all resources were
|
|
||||||
// properly cleaned up.
|
|
||||||
Teardown builderT.TestTeardownFunc
|
|
||||||
// Template is the provisioner template to use.
|
|
||||||
// The provisioner template fragment must be a json-formatted string
|
|
||||||
// containing the provisioner definition but no other portions of a packer
|
|
||||||
// template. For
|
|
||||||
// example:
|
|
||||||
//
|
|
||||||
// ```json
|
|
||||||
// {
|
|
||||||
// "type": "shell-local",
|
|
||||||
// "inline", ["echo hello world"]
|
|
||||||
// }
|
|
||||||
//```
|
|
||||||
//
|
|
||||||
// is a valid entry for "template" here, but the complete Packer template:
|
|
||||||
//
|
|
||||||
// ```json
|
|
||||||
// {
|
|
||||||
// "provisioners": [
|
|
||||||
// {
|
|
||||||
// "type": "shell-local",
|
|
||||||
// "inline", ["echo hello world"]
|
|
||||||
// }
|
|
||||||
// ]
|
|
||||||
// }
|
|
||||||
// ```
|
|
||||||
//
|
|
||||||
// is invalid as input.
|
|
||||||
//
|
|
||||||
// You may provide multiple provisioners in the same template. For example:
|
|
||||||
// ```json
|
|
||||||
// {
|
|
||||||
// "type": "shell-local",
|
|
||||||
// "inline", ["echo hello world"]
|
|
||||||
// },
|
|
||||||
// {
|
|
||||||
// "type": "shell-local",
|
|
||||||
// "inline", ["echo hello world 2"]
|
|
||||||
// }
|
|
||||||
// ```
|
|
||||||
Template string
|
|
||||||
// Type is the type of provisioner.
|
|
||||||
Type string
|
|
||||||
}
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
To start writing a new provisioner acceptance test, you should add a test file
|
|
||||||
named `provisioner_acc_test.go` in the same folder as your provisioner is
|
|
||||||
defined. Create a test case by implementing the above struct, and run it
|
|
||||||
by calling `provisioneracc.TestProvisionersAgainstBuilders(testCase, t)`
|
|
||||||
|
|
||||||
The following example has been adapted from a shell-local provisioner test:
|
|
||||||
|
|
||||||
```
|
|
||||||
import (
|
|
||||||
"github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc"
|
|
||||||
"github.com/hashicorp/packer-plugin-sdk/acctest/testutils"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ...
|
|
||||||
|
|
||||||
func TestAccShellProvisioner_basic(t *testing.T) {
|
|
||||||
// Create a json template fragment containing just the provisioners you want
|
|
||||||
// to run.
|
|
||||||
templateString := `{
|
|
||||||
"type": "shell-local",
|
|
||||||
"script": "test-fixtures/script.sh",
|
|
||||||
"max_retries" : 5
|
|
||||||
}`
|
|
||||||
|
|
||||||
// instantiate a test case.
|
|
||||||
testCase := &provisioneracc.ProvisionerTestCase{
|
|
||||||
IsCompatible: func() bool {return true},
|
|
||||||
Name: "shell-local-provisioner-basic",
|
|
||||||
Teardown: func() error {
|
|
||||||
testutils.CleanupFiles("test-fixtures/file.txt")
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
Template: templateString,
|
|
||||||
Type: "shell-local",
|
|
||||||
Check: func(buildcommand *exec.Cmd, logfile string) error {
|
|
||||||
if buildcommand.ProcessState != nil {
|
|
||||||
if buildcommand.ProcessState.ExitCode() != 0 {
|
|
||||||
return fmt.Errorf("Bad exit code. Logfile: %s", logfile)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
filecontents, err := loadFile("file.txt")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !strings.Contains(filecontents, "hello") {
|
|
||||||
return fmt.Errorf("file contents were wrong: %s", filecontents)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioneracc.TestProvisionersAgainstBuilders(testCase, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
After writing the struct and implementing the interface, now is time to write the test that will run all
|
|
||||||
of this code you wrote. Your test should be like:
|
|
||||||
|
|
||||||
```go
|
|
||||||
func TestShellProvisioner(t *testing.T) {
|
|
||||||
acc.TestProvisionersPreCheck("shell", t)
|
|
||||||
acc.TestProvisionersAgainstBuilders(new(ShellProvisionerAccTest), t)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The method `TestProvisionersAgainstBuilders` will run the provisioner against
|
|
||||||
all available and compatible builders. If there are not builders compatible with
|
|
||||||
the test you want to run, you can add a builder using the following steps:
|
|
||||||
|
|
||||||
Create a subdirectory in provisioneracc/test-fixtures for the type of builder
|
|
||||||
you are adding. In this subdirectory, add one json file containing a single
|
|
||||||
builder fragment. For example, one of our amazon-ebs builders is defined in
|
|
||||||
provisioneracc/test-fixtures/amazon-ebs/amazon-ebs.txt and contains:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"type": "amazon-ebs",
|
|
||||||
"ami_name": "packer-acc-test",
|
|
||||||
"instance_type": "t2.micro",
|
|
||||||
"region": "us-east-1",
|
|
||||||
"ssh_username": "ubuntu",
|
|
||||||
"source_ami_filter": {
|
|
||||||
"filters": {
|
|
||||||
"virtualization-type": "hvm",
|
|
||||||
"name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
|
|
||||||
"root-device-type": "ebs"
|
|
||||||
},
|
|
||||||
"owners": ["099720109477"],
|
|
||||||
"most_recent": true
|
|
||||||
},
|
|
||||||
"force_deregister" : true,
|
|
||||||
"tags": {
|
|
||||||
"packer-test": "true"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
note that this fragment does not contain anything other than a single builder
|
|
||||||
definition. The testing framework will combine this with the provisioner
|
|
||||||
fragment to create a working json template.
|
|
||||||
|
|
||||||
In order to tell the testing framework how to use this builder fragment, you
|
|
||||||
need to implement a `BuilderFixture` struct:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type BuilderFixture struct {
|
|
||||||
// Name is the name of the builder fixture.
|
|
||||||
// Be simple and descriptive.
|
|
||||||
Name string
|
|
||||||
// Setup creates necessary extra test fixtures, and renders their values
|
|
||||||
// into the BuilderFixture.Template.
|
|
||||||
Setup func()
|
|
||||||
// Template is the path to a builder template fragment.
|
|
||||||
// The builder template fragment must be a json-formatted file containing
|
|
||||||
// the builder definition but no other portions of a packer template. For
|
|
||||||
// example:
|
|
||||||
//
|
|
||||||
// ```json
|
|
||||||
// {
|
|
||||||
// "type": "null",
|
|
||||||
// "communicator", "none"
|
|
||||||
// }
|
|
||||||
//```
|
|
||||||
//
|
|
||||||
// is a valid entry for "template" here, but the complete Packer template:
|
|
||||||
//
|
|
||||||
// ```json
|
|
||||||
// {
|
|
||||||
// "builders": [
|
|
||||||
// "type": "null",
|
|
||||||
// "communicator": "none"
|
|
||||||
// ]
|
|
||||||
// }
|
|
||||||
// ```
|
|
||||||
//
|
|
||||||
// is invalid as input.
|
|
||||||
//
|
|
||||||
// Only provide one builder template fragment per file.
|
|
||||||
TemplatePath string
|
|
||||||
|
|
||||||
// GuestOS says what guest os type the builder template fragment creates.
|
|
||||||
// Valid values are "windows", "linux" or "darwin" guests.
|
|
||||||
GuestOS string
|
|
||||||
|
|
||||||
// HostOS says what host os type the builder is capable of running on.
|
|
||||||
// Valid values are "any", windows", or "posix". If you set "posix", then
|
|
||||||
// this builder can run on a "linux" or "darwin" platform. If you set
|
|
||||||
// "any", then this builder can be used on any platform.
|
|
||||||
HostOS string
|
|
||||||
|
|
||||||
Teardown builderT.TestTeardownFunc
|
|
||||||
}
|
|
||||||
```
|
|
||||||
Implement this struct to the file "provisioneracc/builders.go", then add
|
|
||||||
the new implementation to the `BuildersAccTest` map in
|
|
||||||
`provisioneracc/provisioners.go`
|
|
||||||
|
|
||||||
Once you finish these steps, you should be ready to run your new provisioner
|
|
||||||
acceptance test by setting the name used in the BuildersAccTest map as your
|
|
||||||
`ACC_TEST_BUILDERS` environment variable.
|
|
||||||
|
|
||||||
#### Debugging Plugins
|
|
||||||
|
|
||||||
Each packer plugin runs in a separate process and communicates via RPC over a
|
|
||||||
socket therefore using a debugger will not work (be complicated at least).
|
|
||||||
|
|
||||||
But most of the Packer code is really simple and easy to follow with PACKER_LOG
|
|
||||||
turned on. If that doesn't work adding some extra debug print outs when you have
|
|
||||||
homed in on the problem is usually enough.
|
|
||||||
|
26
.github/ISSUE_TEMPLATE.md
vendored
Normal file
26
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
_Please read these instructions before submitting_
|
||||||
|
|
||||||
|
**DELETE THIS TEMPLATE BEFORE SUBMITTING**
|
||||||
|
|
||||||
|
_Only use Github issues to report bugs or feature requests, see
|
||||||
|
https://www.packer.io/community.html_
|
||||||
|
|
||||||
|
For example, _Timeouts waiting for SSH/WinRM_ are generally not bugs within packer and are better addressed by the mailing list. Ask on the mailing list if you are unsure.
|
||||||
|
|
||||||
|
If you are planning to open a pull-request just open the pull-request instead of making an issue first.
|
||||||
|
|
||||||
|
FOR FEATURES:
|
||||||
|
|
||||||
|
Describe the feature you want and your use case _clearly_.
|
||||||
|
|
||||||
|
FOR BUGS:
|
||||||
|
|
||||||
|
Describe the problem and include the following information:
|
||||||
|
|
||||||
|
- Packer version from `packer version`
|
||||||
|
- Host platform
|
||||||
|
- Debug log output from `PACKER_LOG=1 packer build template.json`.
|
||||||
|
Please paste this in a gist https://gist.github.com
|
||||||
|
- The _simplest example template and scripts_ needed to reproduce the bug.
|
||||||
|
Include these in your gist https://gist.github.com
|
||||||
|
|
40
.github/ISSUE_TEMPLATE/bug_report.md
vendored
40
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -1,40 +0,0 @@
|
|||||||
---
|
|
||||||
name: Bug Report
|
|
||||||
about: You're experiencing an issue with Packer that is different than the documented behavior.
|
|
||||||
labels: bug
|
|
||||||
---
|
|
||||||
|
|
||||||
When filing a bug, please include the following headings if possible. Any
|
|
||||||
example text in this template can be deleted.
|
|
||||||
|
|
||||||
#### Overview of the Issue
|
|
||||||
|
|
||||||
A paragraph or two about the issue you're experiencing.
|
|
||||||
|
|
||||||
#### Reproduction Steps
|
|
||||||
|
|
||||||
Steps to reproduce this issue
|
|
||||||
|
|
||||||
### Packer version
|
|
||||||
|
|
||||||
From `packer version`
|
|
||||||
|
|
||||||
### Simplified Packer Buildfile
|
|
||||||
|
|
||||||
If the file is longer than a few dozen lines, please include the URL to the
|
|
||||||
[gist](https://gist.github.com/) of the log or use the [Github detailed
|
|
||||||
format](https://gist.github.com/ericclemmons/b146fe5da72ca1f706b2ef72a20ac39d)
|
|
||||||
instead of posting it directly in the issue.
|
|
||||||
|
|
||||||
### Operating system and Environment details
|
|
||||||
|
|
||||||
OS, Architecture, and any other information you can provide about the
|
|
||||||
environment.
|
|
||||||
|
|
||||||
### Log Fragments and crash.log files
|
|
||||||
|
|
||||||
Include appropriate log fragments. If the log is longer than a few dozen lines,
|
|
||||||
please include the URL to the [gist](https://gist.github.com/) of the log or
|
|
||||||
use the [Github detailed format](https://gist.github.com/ericclemmons/b146fe5da72ca1f706b2ef72a20ac39d) instead of posting it directly in the issue.
|
|
||||||
|
|
||||||
Set the env var `PACKER_LOG=1` for maximum log detail.
|
|
31
.github/ISSUE_TEMPLATE/feature_requests.md
vendored
31
.github/ISSUE_TEMPLATE/feature_requests.md
vendored
@ -1,31 +0,0 @@
|
|||||||
---
|
|
||||||
name: Feature Request
|
|
||||||
about: If you have something you think Packer could improve or add support for.
|
|
||||||
labels: enhancement
|
|
||||||
---
|
|
||||||
|
|
||||||
Please search the existing issues for relevant feature requests, and use the
|
|
||||||
reaction feature
|
|
||||||
(https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/)
|
|
||||||
to add upvotes to pre-existing requests.
|
|
||||||
|
|
||||||
#### Community Note
|
|
||||||
|
|
||||||
Please vote on this issue by adding a 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to the original issue to help the community and maintainers prioritize this request.
|
|
||||||
Please do not leave "+1" or "me too" comments, they generate extra noise for issue followers and do not help prioritize the request.
|
|
||||||
If you are interested in working on this issue or have submitted a pull request, please leave a comment.
|
|
||||||
|
|
||||||
#### Description
|
|
||||||
|
|
||||||
A written overview of the feature.
|
|
||||||
|
|
||||||
#### Use Case(s)
|
|
||||||
|
|
||||||
Any relevant use-cases that you see.
|
|
||||||
|
|
||||||
#### Potential configuration
|
|
||||||
|
|
||||||
```
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Potential References
|
|
15
.github/ISSUE_TEMPLATE/question.md
vendored
15
.github/ISSUE_TEMPLATE/question.md
vendored
@ -1,15 +0,0 @@
|
|||||||
---
|
|
||||||
name: Question
|
|
||||||
about: If you have a question, please check out our other community resources instead of opening an issue.
|
|
||||||
labels: question
|
|
||||||
---
|
|
||||||
|
|
||||||
Issues on GitHub are intended to be related to bugs or feature requests, so we
|
|
||||||
recommend using our other community resources instead of asking here if you
|
|
||||||
have a question.
|
|
||||||
|
|
||||||
- Packer Guides: https://www.packer.io/guides
|
|
||||||
- Discussion List: https://groups.google.com/group/packer-tool
|
|
||||||
- Any other questions can be sent to the packer section of the HashiCorp
|
|
||||||
forum: https://discuss.hashicorp.com/c/packer
|
|
||||||
- Packer community links: https://www.packer.io/community
|
|
23
.github/ISSUE_TEMPLATE/ssh_or_winrm_times_out.md
vendored
23
.github/ISSUE_TEMPLATE/ssh_or_winrm_times_out.md
vendored
@ -1,23 +0,0 @@
|
|||||||
---
|
|
||||||
name: SSH or WinRM times out
|
|
||||||
about: I have a waiting SSH or WinRM error.
|
|
||||||
labels: communicator-question
|
|
||||||
---
|
|
||||||
|
|
||||||
Got one of the following errors ? See if the related guides can help.
|
|
||||||
|
|
||||||
- `Waiting for WinRM to become available` ?
|
|
||||||
|
|
||||||
- See our basic WinRm Packer guide: https://www.packer.io/guides/automatic-operating-system-installs/autounattend_windows
|
|
||||||
|
|
||||||
- `Waiting for SSH to become available` ?
|
|
||||||
|
|
||||||
- See our basic SSH Packer guide: https://www.packer.io/guides/automatic-operating-system-installs/preseed_ubuntu
|
|
||||||
|
|
||||||
Issues on GitHub are intended to be related to bugs or feature requests, so we recommend using our other community resources instead of asking here if you have a question.
|
|
||||||
|
|
||||||
- Packer Guides: https://www.packer.io/guides
|
|
||||||
- Discussion List: https://groups.google.com/group/packer-tool
|
|
||||||
- Any other questions can be sent to the packer section of the HashiCorp
|
|
||||||
forum: https://discuss.hashicorp.com/c/packer
|
|
||||||
- Packer community links: https://www.packer.io/community
|
|
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -1,16 +1,10 @@
|
|||||||
**DELETE THIS TEMPLATE BEFORE SUBMITTING**
|
**DELETE THIS TEMPLATE BEFORE SUBMITTING**
|
||||||
|
|
||||||
In order to have a good experience with our community, we recommend that you
|
|
||||||
read the contributing guidelines for making a PR, and understand the lifecycle
|
|
||||||
of a Packer PR:
|
|
||||||
|
|
||||||
https://github.com/hashicorp/packer/blob/master/.github/CONTRIBUTING.md#opening-an-pull-request
|
|
||||||
|
|
||||||
Describe the change you are making here!
|
Describe the change you are making here!
|
||||||
|
|
||||||
Please include tests. Check out these examples:
|
Please include tests. Check out these examples:
|
||||||
|
|
||||||
- https://github.com/hashicorp/packer/blob/master/builder/parallels/common/ssh_config_test.go#L34
|
- https://github.com/hashicorp/packer/blob/master/builder/virtualbox/common/ssh_config_test.go#L19-L37
|
||||||
- https://github.com/hashicorp/packer/blob/master/post-processor/compress/post-processor_test.go#L153-L182
|
- https://github.com/hashicorp/packer/blob/master/post-processor/compress/post-processor_test.go#L153-L182
|
||||||
|
|
||||||
If your PR resolves any open issue(s), please indicate them like this so they will be closed when your PR is merged:
|
If your PR resolves any open issue(s), please indicate them like this so they will be closed when your PR is merged:
|
||||||
|
5
.github/labeler-issue-triage.yml
vendored
5
.github/labeler-issue-triage.yml
vendored
@ -1,5 +0,0 @@
|
|||||||
bug:
|
|
||||||
- 'panic:'
|
|
||||||
crash:
|
|
||||||
- 'panic:'
|
|
||||||
|
|
102
.github/workflows/check-plugin-docs.js
vendored
102
.github/workflows/check-plugin-docs.js
vendored
@ -1,102 +0,0 @@
|
|||||||
const fs = require("fs");
|
|
||||||
const path = require("path");
|
|
||||||
const fetchPluginDocs = require("../../website/components/remote-plugin-docs/utils/fetch-plugin-docs");
|
|
||||||
|
|
||||||
const COLOR_RESET = "\x1b[0m";
|
|
||||||
const COLOR_GREEN = "\x1b[32m";
|
|
||||||
const COLOR_BLUE = "\x1b[34m";
|
|
||||||
const COLOR_RED = "\x1b[31m";
|
|
||||||
|
|
||||||
async function checkPluginDocs() {
|
|
||||||
const failureMessages = [];
|
|
||||||
const pluginsPath = "website/data/docs-remote-plugins.json";
|
|
||||||
const pluginsFile = fs.readFileSync(path.join(process.cwd(), pluginsPath));
|
|
||||||
const pluginEntries = JSON.parse(pluginsFile);
|
|
||||||
const entriesCount = pluginEntries.length;
|
|
||||||
console.log(`\nResolving plugin docs from ${entriesCount} repositories …`);
|
|
||||||
for (var i = 0; i < entriesCount; i++) {
|
|
||||||
const pluginEntry = pluginEntries[i];
|
|
||||||
const { title, repo, version } = pluginEntry;
|
|
||||||
console.log(`\n${COLOR_BLUE}${repo}${COLOR_RESET} | ${title}`);
|
|
||||||
console.log(`Fetching docs from release "${version}" …`);
|
|
||||||
try {
|
|
||||||
// Validate that all required properties are present
|
|
||||||
const undefinedProps = ["title", "repo", "version", "path"].filter(
|
|
||||||
(key) => typeof pluginEntry[key] == "undefined"
|
|
||||||
);
|
|
||||||
if (undefinedProps.length > 0) {
|
|
||||||
throw new Error(
|
|
||||||
`Failed to validate plugin docs config. Undefined configuration properties ${JSON.stringify(
|
|
||||||
undefinedProps
|
|
||||||
)} found for "${
|
|
||||||
title || pluginEntry.path || repo
|
|
||||||
}". In "website/data/docs-remote-plugins.json", please ensure the missing properties ${JSON.stringify(
|
|
||||||
undefinedProps
|
|
||||||
)} are defined. Additional information on this configuration can be found in "website/README.md".`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
// Validate pluginTier property
|
|
||||||
const { pluginTier } = pluginEntry;
|
|
||||||
if (typeof pluginTier !== "undefined") {
|
|
||||||
const validPluginTiers = ["official", "community"];
|
|
||||||
const isValid = validPluginTiers.indexOf(pluginTier) !== -1;
|
|
||||||
if (!isValid) {
|
|
||||||
throw new Error(
|
|
||||||
`Failed to validate plugin docs config. Invalid pluginTier "${pluginTier}" found for "${
|
|
||||||
title || pluginEntry.path || repo
|
|
||||||
}". In "website/data/docs-remote-plugins.json", the optional pluginTier property must be one of ${JSON.stringify(
|
|
||||||
validPluginTiers
|
|
||||||
)}. The pluginTier property can also be omitted, in which case it will be determined from the plugin repository owner.`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Validate that local zip files are not used in production
|
|
||||||
if (typeof pluginEntry.zipFile !== "undefined") {
|
|
||||||
throw new Error(
|
|
||||||
`Local ZIP file being used for "${
|
|
||||||
title || pluginEntry.path || repo
|
|
||||||
}". The zipFile option should only be used for local development. Please omit the zipFile attribute and ensure the plugin entry points to a remote repository.`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
// Attempt to fetch plugin docs files
|
|
||||||
const docsMdxFiles = await fetchPluginDocs({ repo, tag: version });
|
|
||||||
const mdxFilesByComponent = docsMdxFiles.reduce((acc, mdxFile) => {
|
|
||||||
const componentType = mdxFile.filePath.split("/")[1];
|
|
||||||
if (!acc[componentType]) acc[componentType] = [];
|
|
||||||
acc[componentType].push(mdxFile);
|
|
||||||
return acc;
|
|
||||||
}, {});
|
|
||||||
console.log(`${COLOR_GREEN}Found valid docs:${COLOR_RESET}`);
|
|
||||||
Object.keys(mdxFilesByComponent).forEach((component) => {
|
|
||||||
const componentFiles = mdxFilesByComponent[component];
|
|
||||||
console.log(` ${component}`);
|
|
||||||
componentFiles.forEach(({ filePath }) => {
|
|
||||||
const pathFromComponent = filePath.split("/").slice(2).join("/");
|
|
||||||
console.log(` ├── ${pathFromComponent}`);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
} catch (err) {
|
|
||||||
console.log(`${COLOR_RED}${err}${COLOR_RESET}`);
|
|
||||||
failureMessages.push(`\n${COLOR_RED}× ${repo}: ${COLOR_RESET}${err}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (failureMessages.length === 0) {
|
|
||||||
console.log(
|
|
||||||
`\n---\n\n${COLOR_GREEN}Summary: Successfully resolved all plugin docs.`
|
|
||||||
);
|
|
||||||
pluginEntries.forEach((e) =>
|
|
||||||
console.log(`${COLOR_GREEN}✓ ${e.repo}${COLOR_RESET}`)
|
|
||||||
);
|
|
||||||
console.log("");
|
|
||||||
} else {
|
|
||||||
console.log(
|
|
||||||
`\n---\n\n${COLOR_RED}Summary: Failed to fetch docs for ${failureMessages.length} plugin(s):`
|
|
||||||
);
|
|
||||||
failureMessages.forEach((err) => console.log(err));
|
|
||||||
console.log("");
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
checkPluginDocs();
|
|
29
.github/workflows/check-plugin-docs.yml
vendored
29
.github/workflows/check-plugin-docs.yml
vendored
@ -1,29 +0,0 @@
|
|||||||
#
|
|
||||||
# This GitHub action checks plugin repositories for valid docs.
|
|
||||||
#
|
|
||||||
# This provides a quick assessment on PRs of whether
|
|
||||||
# there might be issues with docs in plugin repositories.
|
|
||||||
#
|
|
||||||
# This is intended to help debug Vercel build issues, which
|
|
||||||
# may or may not be related to docs in plugin repositories.
|
|
||||||
|
|
||||||
name: "website: Check plugin docs"
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- "website/**"
|
|
||||||
schedule:
|
|
||||||
- cron: "45 0 * * *"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check-plugin-docs:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
- name: Setup Node
|
|
||||||
uses: actions/setup-node@v1
|
|
||||||
- name: Install Dependencies
|
|
||||||
run: npm i isomorphic-unfetch adm-zip gray-matter
|
|
||||||
- name: Fetch and validate plugin docs
|
|
||||||
run: node .github/workflows/check-plugin-docs.js
|
|
17
.github/workflows/issue-comment-created.yml
vendored
17
.github/workflows/issue-comment-created.yml
vendored
@ -1,17 +0,0 @@
|
|||||||
name: Issue Comment Created Triage
|
|
||||||
|
|
||||||
on:
|
|
||||||
issue_comment:
|
|
||||||
types: [created]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
issue_comment_triage:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- uses: actions-ecosystem/action-remove-labels@v1
|
|
||||||
with:
|
|
||||||
github_token: "${{ secrets.GITHUB_TOKEN }}"
|
|
||||||
labels: |
|
|
||||||
stale
|
|
||||||
waiting-reply
|
|
16
.github/workflows/issues-opened.yml
vendored
16
.github/workflows/issues-opened.yml
vendored
@ -1,16 +0,0 @@
|
|||||||
name: Issue Opened Triage
|
|
||||||
|
|
||||||
on:
|
|
||||||
issues:
|
|
||||||
types: [opened]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
issue_triage:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- uses: github/issue-labeler@v2
|
|
||||||
with:
|
|
||||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
|
||||||
configuration-path: .github/labeler-issue-triage.yml
|
|
||||||
|
|
14
.github/workflows/issues.yml
vendored
14
.github/workflows/issues.yml
vendored
@ -1,14 +0,0 @@
|
|||||||
name: Milestone Labeler
|
|
||||||
on:
|
|
||||||
issues:
|
|
||||||
types: [milestoned]
|
|
||||||
jobs:
|
|
||||||
apply_labels:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Add track-internal
|
|
||||||
uses: andymckay/labeler@1.0.2
|
|
||||||
if: github.event.issue.pull_request == null
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.Github_Token }}
|
|
||||||
add-labels: "track-internal"
|
|
37
.github/workflows/linkchecker.yml
vendored
37
.github/workflows/linkchecker.yml
vendored
@ -1,37 +0,0 @@
|
|||||||
on:
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- 'website/**'
|
|
||||||
|
|
||||||
name: Check markdown links on modified website files
|
|
||||||
jobs:
|
|
||||||
vercel-deployment-poll:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 5 #cancel job if no deployment is found within x minutes
|
|
||||||
outputs:
|
|
||||||
url: ${{ steps.waitForVercelPreviewDeployment.outputs.url }}
|
|
||||||
steps:
|
|
||||||
- name: Wait for Vercel preview deployment to be ready
|
|
||||||
uses: nywilken/wait-for-vercel-preview@master
|
|
||||||
id: waitForVercelPreviewDeployment
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
max_timeout: 600 # in seconds, set really high to leverage job timeout-minutes values
|
|
||||||
allow_inactive: true # needed to ensure we get a URL for a previously released deployment
|
|
||||||
markdown-link-check:
|
|
||||||
needs: vercel-deployment-poll
|
|
||||||
if: ${{ needs.vercel-deployment-poll.outputs.url != '' }}
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Get Deployment URL
|
|
||||||
run:
|
|
||||||
echo "DEPLOYMENT_URL=${{ needs.vercel-deployment-poll.outputs.url }}" >> $GITHUB_ENV
|
|
||||||
- name: Checkout source branch
|
|
||||||
uses: actions/checkout@master
|
|
||||||
- name: Check links
|
|
||||||
uses: gaurav-nelson/github-action-markdown-link-check@v1
|
|
||||||
with:
|
|
||||||
use-quiet-mode: 'yes'
|
|
||||||
file-extension: 'mdx'
|
|
||||||
check-modified-files-only: 'yes'
|
|
||||||
folder-path: 'website/content'
|
|
29
.github/workflows/lock.yml
vendored
29
.github/workflows/lock.yml
vendored
@ -1,29 +0,0 @@
|
|||||||
name: 'Lock Threads'
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: '50 1 * * *'
|
|
||||||
|
|
||||||
# Only 50 issues will be handled during a given run.
|
|
||||||
jobs:
|
|
||||||
lock:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: dessant/lock-threads@v2
|
|
||||||
with:
|
|
||||||
github-token: ${{ github.token }}
|
|
||||||
issue-lock-comment: >
|
|
||||||
I'm going to lock this issue because it has been closed for _30 days_ ⏳. This helps our maintainers find and focus on the active issues.
|
|
||||||
|
|
||||||
If you have found a problem that seems similar to this, please open a new issue and complete the issue template so we can capture all the details necessary to investigate further.
|
|
||||||
issue-lock-inactive-days: '30'
|
|
||||||
# Issues older than 180 days ago should be ignored
|
|
||||||
issue-exclude-created-before: '2020-11-01'
|
|
||||||
pr-lock-comment: >
|
|
||||||
I'm going to lock this pull request because it has been closed for _30 days_ ⏳. This helps our maintainers find and focus on the active issues.
|
|
||||||
|
|
||||||
If you have found a problem that seems related to this change, please open a new issue and complete the issue template so we can capture all the details necessary to investigate further.
|
|
||||||
pr-lock-inactive-days: '30'
|
|
||||||
# Issues older than 180 days ago should be ignored
|
|
||||||
pr-exclude-created-before: '2020-11-01'
|
|
||||||
|
|
17
.github/workflows/scheduled-link-checker.yml
vendored
17
.github/workflows/scheduled-link-checker.yml
vendored
@ -1,17 +0,0 @@
|
|||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "45 0 * * *"
|
|
||||||
name: Check Markdown links on main branch
|
|
||||||
jobs:
|
|
||||||
markdown-link-check:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Set deployment URL env
|
|
||||||
run:
|
|
||||||
echo "DEPLOYMENT_URL=https://packer-git-master.hashicorp.vercel.app" >> $GITHUB_ENV
|
|
||||||
- uses: actions/checkout@master
|
|
||||||
- uses: gaurav-nelson/github-action-markdown-link-check@v1
|
|
||||||
with:
|
|
||||||
use-quiet-mode: 'yes'
|
|
||||||
file-extension: 'mdx'
|
|
||||||
folder-path: 'website/content'
|
|
5
.gitignore
vendored
5
.gitignore
vendored
@ -4,7 +4,6 @@
|
|||||||
/src
|
/src
|
||||||
/website/.sass-cache
|
/website/.sass-cache
|
||||||
/website/build
|
/website/build
|
||||||
/website/tmp
|
|
||||||
.DS_Store
|
.DS_Store
|
||||||
.vagrant
|
.vagrant
|
||||||
.idea
|
.idea
|
||||||
@ -13,8 +12,6 @@ test/.env
|
|||||||
*.received.*
|
*.received.*
|
||||||
*.swp
|
*.swp
|
||||||
|
|
||||||
vendor/
|
|
||||||
|
|
||||||
website/.bundle
|
website/.bundle
|
||||||
website/vendor
|
website/vendor
|
||||||
|
|
||||||
@ -28,5 +25,3 @@ packer-test*.log
|
|||||||
Thumbs.db
|
Thumbs.db
|
||||||
/packer.exe
|
/packer.exe
|
||||||
.project
|
.project
|
||||||
cache
|
|
||||||
/.vscode/
|
|
||||||
|
123
.golangci.yml
123
.golangci.yml
@ -1,123 +0,0 @@
|
|||||||
issues:
|
|
||||||
# List of regexps of issue texts to exclude, empty list by default.
|
|
||||||
# But independently from this option we use default exclude patterns,
|
|
||||||
# it can be disabled by `exclude-use-default: false`. To list all
|
|
||||||
# excluded by default patterns execute `golangci-lint run --help`
|
|
||||||
|
|
||||||
exclude-rules:
|
|
||||||
# Exclude gosimple bool check
|
|
||||||
- linters:
|
|
||||||
- gosimple
|
|
||||||
text: "S(1002|1008|1021)"
|
|
||||||
# Exclude failing staticchecks for now
|
|
||||||
- linters:
|
|
||||||
- staticcheck
|
|
||||||
text: "SA(1006|1019|4006|4010|4017|5007|6005|9004):"
|
|
||||||
# Exclude lll issues for long lines with go:generate
|
|
||||||
- linters:
|
|
||||||
- lll
|
|
||||||
source: "^//go:generate "
|
|
||||||
|
|
||||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
|
||||||
max-issues-per-linter: 0
|
|
||||||
|
|
||||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
|
||||||
max-same-issues: 0
|
|
||||||
|
|
||||||
linters:
|
|
||||||
disable-all: true
|
|
||||||
enable:
|
|
||||||
- deadcode
|
|
||||||
- errcheck
|
|
||||||
- goimports
|
|
||||||
- gosimple
|
|
||||||
- govet
|
|
||||||
- ineffassign
|
|
||||||
- staticcheck
|
|
||||||
- unconvert
|
|
||||||
- unused
|
|
||||||
- varcheck
|
|
||||||
fast: true
|
|
||||||
|
|
||||||
# options for analysis running
|
|
||||||
run:
|
|
||||||
# default concurrency is a available CPU number
|
|
||||||
concurrency: 4
|
|
||||||
|
|
||||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
|
||||||
timeout: 10m
|
|
||||||
|
|
||||||
# exit code when at least one issue was found, default is 1
|
|
||||||
issues-exit-code: 1
|
|
||||||
|
|
||||||
# include test files or not, default is true
|
|
||||||
tests: true
|
|
||||||
|
|
||||||
# list of build tags, all linters use it. Default is empty list.
|
|
||||||
#build-tags:
|
|
||||||
# - mytag
|
|
||||||
|
|
||||||
# which dirs to skip: issues from them won't be reported;
|
|
||||||
# can use regexp here: generated.*, regexp is applied on full path;
|
|
||||||
# default value is empty list, but default dirs are skipped independently
|
|
||||||
# from this option's value (see skip-dirs-use-default).
|
|
||||||
#skip-dirs:
|
|
||||||
# - src/external_libs
|
|
||||||
# - autogenerated_by_my_lib
|
|
||||||
|
|
||||||
# default is true. Enables skipping of directories:
|
|
||||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
|
||||||
skip-dirs-use-default: true
|
|
||||||
|
|
||||||
# which files to skip: they will be analyzed, but issues from them
|
|
||||||
# won't be reported. Default value is empty list, but there is
|
|
||||||
# no need to include all autogenerated files, we confidently recognize
|
|
||||||
# autogenerated files. If it's not please let us know.
|
|
||||||
skip-files:
|
|
||||||
- ".*\\.hcl2spec\\.go$"
|
|
||||||
# - lib/bad.go
|
|
||||||
|
|
||||||
# by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules":
|
|
||||||
# If invoked with -mod=readonly, the go command is disallowed from the implicit
|
|
||||||
# automatic updating of go.mod described above. Instead, it fails when any changes
|
|
||||||
# to go.mod are needed. This setting is most useful to check that go.mod does
|
|
||||||
# not need updates, such as in a continuous integration and testing system.
|
|
||||||
# If invoked with -mod=vendor, the go command assumes that the vendor
|
|
||||||
# directory holds the correct copies of dependencies and ignores
|
|
||||||
# the dependency descriptions in go.mod.
|
|
||||||
modules-download-mode: readonly
|
|
||||||
|
|
||||||
# output configuration options
|
|
||||||
output:
|
|
||||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
|
||||||
format: colored-line-number
|
|
||||||
|
|
||||||
# print lines of code with issue, default is true
|
|
||||||
print-issued-lines: true
|
|
||||||
|
|
||||||
# print linter name in the end of issue text, default is true
|
|
||||||
print-linter-name: true
|
|
||||||
|
|
||||||
# make issues output unique by line, default is true
|
|
||||||
uniq-by-line: true
|
|
||||||
|
|
||||||
|
|
||||||
# all available settings of specific linters
|
|
||||||
linters-settings:
|
|
||||||
errcheck:
|
|
||||||
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
|
|
||||||
# default is false: such cases aren't reported by default.
|
|
||||||
check-type-assertions: false
|
|
||||||
|
|
||||||
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
|
|
||||||
# default is false: such cases aren't reported by default.
|
|
||||||
check-blank: false
|
|
||||||
|
|
||||||
# [deprecated] comma-separated list of pairs of the form pkg:regex
|
|
||||||
# the regex is used to ignore names within pkg. (default "fmt:.*").
|
|
||||||
# see https://github.com/kisielk/errcheck#the-deprecated-method for details
|
|
||||||
ignore: fmt:.*,io/ioutil:^Read.*,io:Close
|
|
||||||
|
|
||||||
# path to a file containing a list of functions to exclude from checking
|
|
||||||
# see https://github.com/kisielk/errcheck#excluding-functions for details
|
|
||||||
#exclude: /path/to/file.txt
|
|
@ -1,17 +0,0 @@
|
|||||||
poll "label_issue_migrater" "remote_plugin_migrater" {
|
|
||||||
schedule = "0 20 * * * *"
|
|
||||||
new_owner = "hashicorp"
|
|
||||||
repo_prefix = "packer-plugin-"
|
|
||||||
label_prefix = "remote-plugin/"
|
|
||||||
excluded_label_prefixes = ["communicator/"]
|
|
||||||
excluded_labels = ["build", "core", "new-plugin-contribution", "website"]
|
|
||||||
|
|
||||||
issue_header = <<-EOF
|
|
||||||
_This issue was originally opened by @${var.user} as ${var.repository}#${var.issue_number}. It was migrated here as a result of the [Packer plugin split](https://github.com/hashicorp/packer/issues/8610#issuecomment-770034737). The original body of the issue is below._
|
|
||||||
|
|
||||||
<hr>
|
|
||||||
|
|
||||||
EOF
|
|
||||||
migrated_comment = "This issue has been automatically migrated to ${var.repository}#${var.issue_number} because it looks like an issue with that plugin. If you believe this is _not_ an issue with the plugin, please reply to ${var.repository}#${var.issue_number}."
|
|
||||||
}
|
|
||||||
|
|
25
.travis.yml
Normal file
25
.travis.yml
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
env:
|
||||||
|
- USER=travis
|
||||||
|
|
||||||
|
sudo: false
|
||||||
|
|
||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.11.x
|
||||||
|
- master
|
||||||
|
|
||||||
|
install:
|
||||||
|
- make deps
|
||||||
|
|
||||||
|
script:
|
||||||
|
- GOMAXPROCS=2 make ci
|
||||||
|
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- master
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- go: master
|
||||||
|
fast_finish: true
|
2040
CHANGELOG.md
2040
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
70
CODEOWNERS
70
CODEOWNERS
@ -2,71 +2,27 @@
|
|||||||
|
|
||||||
# builders
|
# builders
|
||||||
|
|
||||||
/examples/alicloud/ @chhaj5236 @alexyueer
|
/builder/alicloud/ dongxiao.zzh@alibaba-inc.com
|
||||||
/builder/alicloud/ @chhaj5236 @alexyueer
|
/builder/amazon/ebssurrogate/ @jen20
|
||||||
/website/pages/docs/builders/alicloud* @chhaj5236 @alexyueer
|
/builder/amazon/ebsvolume/ @jen20
|
||||||
|
/builder/azure/ @boumenot
|
||||||
/examples/azure/ @paulmey
|
/builder/hyperv/ @taliesins
|
||||||
/builder/azure/ @paulmey
|
/builder/lxc/ @ChrisLundquist
|
||||||
/website/pages/docs/builders/azure* @paulmey
|
/builder/lxd/ @ChrisLundquist
|
||||||
|
/builder/oneandone/ @jasmingacic
|
||||||
/builder/digitalocean/ @andrewsomething
|
/builder/oracle/ @prydie @owainlewis
|
||||||
/website/pages/docs/builders/digitalocean* @andrewsomething
|
/builder/profitbricks/ @jasmingacic
|
||||||
|
/builder/triton/ @jen20 @sean-
|
||||||
/examples/jdcloud/ @XiaohanLiang @remrain
|
/builder/ncloud/ @YuSungDuk
|
||||||
/builder/jdcloud/ @XiaohanLiang @remrain
|
/builder/scaleway/ @dimtion @edouardb
|
||||||
/website/pages/docs/builders/jdcloud* @XiaohanLiang @remrain
|
|
||||||
|
|
||||||
/builder/linode/ @stvnjacobs @charliekenney23 @phillc
|
|
||||||
/website/pages/docs/builders/linode* @stvnjacobs @charliekenney23 @phillc
|
|
||||||
|
|
||||||
/builder/lxc/ @ChrisLundquist
|
|
||||||
/website/pages/docs/builders/lxc* @ChrisLundquist
|
|
||||||
/test/fixtures/builder-lxc/ @ChrisLundquist
|
|
||||||
/test/builder_lxc* @ChrisLundquist
|
|
||||||
|
|
||||||
/builder/lxd/ @ChrisLundquist
|
|
||||||
/website/pages/docs/builders/lxd* @ChrisLundquist
|
|
||||||
|
|
||||||
/builder/oneandone/ @jasmingacic
|
|
||||||
/website/pages/docs/builders/oneandone* @jasmingacic
|
|
||||||
|
|
||||||
/builder/oracle/ @prydie @owainlewis
|
|
||||||
/website/pages/docs/builders/oracle* @prydie @owainlewis
|
|
||||||
|
|
||||||
/builder/profitbricks/ @LiviusP @mflorin
|
|
||||||
/website/pages/docs/builders/profitbricks* @LiviusP @mflorin
|
|
||||||
|
|
||||||
/builder/triton/ @sean-
|
|
||||||
/website/pages/docs/builders/triton* @sean-
|
|
||||||
|
|
||||||
/builder/proxmox/ @carlpett
|
|
||||||
/website/pages/docs/builders/proxmox* @carlpett
|
|
||||||
|
|
||||||
/builder/scaleway/ @scaleway/devtools
|
|
||||||
/website/pages/docs/builders/scaleway* @scaleway/devtools
|
|
||||||
|
|
||||||
/builder/hcloud/ @LKaemmerling
|
|
||||||
/website/pages/docs/builders/hcloud* @LKaemmerling
|
|
||||||
|
|
||||||
/builder/yandex/ @GennadySpb @alexanderKhaustov @seukyaso
|
|
||||||
/website/pages/docs/builders/yandex* @GennadySpb @alexanderKhaustov @seukyaso
|
|
||||||
|
|
||||||
/examples/tencentcloud/ @likexian
|
|
||||||
/builder/tencentcloud/ @likexian
|
|
||||||
/website/pages/docs/builders/tencentcloud* @likexian
|
|
||||||
|
|
||||||
# provisioners
|
# provisioners
|
||||||
|
|
||||||
/examples/ansible/ @bhcleek
|
|
||||||
/provisioner/ansible/ @bhcleek
|
/provisioner/ansible/ @bhcleek
|
||||||
/provisioner/converge/ @stevendborrelli
|
/provisioner/converge/ @stevendborrelli
|
||||||
|
|
||||||
# post-processors
|
# post-processors
|
||||||
|
|
||||||
/post-processor/alicloud-import/ dongxiao.zzh@alibaba-inc.com
|
/post-processor/alicloud-import/ dongxiao.zzh@alibaba-inc.com
|
||||||
/post-processor/checksum/ v.tolstov@selfip.ru
|
/post-processor/checksum/ v.tolstov@selfip.ru
|
||||||
/post-processor/googlecompute-export/ crunkleton@google.com
|
/post-processor/googlecompute-export/ crunkleton@google.com
|
||||||
/post-processor/yandex-export/ @GennadySpb
|
|
||||||
/post-processor/yandex-import/ @GennadySpb
|
|
||||||
/post-processor/vsphere-template/ nelson@bennu.cl
|
/post-processor/vsphere-template/ nelson@bennu.cl
|
||||||
|
30
Dockerfile
30
Dockerfile
@ -1,30 +0,0 @@
|
|||||||
FROM docker.mirror.hashicorp.services/ubuntu:16.04
|
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND noninteractive
|
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
locales \
|
|
||||||
openssh-server \
|
|
||||||
sudo
|
|
||||||
|
|
||||||
RUN locale-gen en_US.UTF-8
|
|
||||||
|
|
||||||
RUN if ! getent passwd vagrant; then useradd -d /home/vagrant -m -s /bin/bash vagrant; fi \
|
|
||||||
&& echo 'vagrant ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers \
|
|
||||||
&& mkdir -p /etc/sudoers.d \
|
|
||||||
&& echo 'vagrant ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers.d/vagrant \
|
|
||||||
&& chmod 0440 /etc/sudoers.d/vagrant
|
|
||||||
|
|
||||||
RUN mkdir -p /home/vagrant/.ssh \
|
|
||||||
&& chmod 0700 /home/vagrant/.ssh \
|
|
||||||
&& wget --no-check-certificate \
|
|
||||||
https://raw.github.com/hashicorp/vagrant/master/keys/vagrant.pub \
|
|
||||||
-O /home/vagrant/.ssh/authorized_keys \
|
|
||||||
&& chmod 0600 /home/vagrant/.ssh/authorized_keys \
|
|
||||||
&& chown -R vagrant /home/vagrant/.ssh
|
|
||||||
|
|
||||||
RUN mkdir -p /run/sshd
|
|
||||||
|
|
||||||
CMD /usr/sbin/sshd -D \
|
|
||||||
-o UseDNS=no \
|
|
||||||
-o PidFile=/tmp/sshd.pid
|
|
151
Makefile
151
Makefile
@ -1,9 +1,5 @@
|
|||||||
TEST?=$(shell go list ./...)
|
TEST?=$(shell go list ./... | grep -v vendor)
|
||||||
COUNT?=1
|
VET?=$(shell ls -d */ | grep -v vendor | grep -v website)
|
||||||
VET?=$(shell go list ./...)
|
|
||||||
|
|
||||||
ACC_TEST_BUILDERS?=all
|
|
||||||
ACC_TEST_PROVISIONERS?=all
|
|
||||||
# Get the current full sha from git
|
# Get the current full sha from git
|
||||||
GITSHA:=$(shell git rev-parse HEAD)
|
GITSHA:=$(shell git rev-parse HEAD)
|
||||||
# Get the current local branch name from git (if we can, this may be blank)
|
# Get the current local branch name from git (if we can, this may be blank)
|
||||||
@ -12,60 +8,48 @@ GOOS=$(shell go env GOOS)
|
|||||||
GOARCH=$(shell go env GOARCH)
|
GOARCH=$(shell go env GOARCH)
|
||||||
GOPATH=$(shell go env GOPATH)
|
GOPATH=$(shell go env GOPATH)
|
||||||
|
|
||||||
EXECUTABLE_FILES=$(shell find . -type f -executable | egrep -v '^\./(website/[vendor|tmp]|vendor/|\.git|bin/|scripts/|pkg/)' | egrep -v '.*(\.sh|\.bats|\.git)' | egrep -v './provisioner/(ansible|inspec)/test-fixtures/exit1')
|
# gofmt
|
||||||
|
UNFORMATTED_FILES=$(shell find . -not -path "./vendor/*" -name "*.go" | xargs gofmt -s -l)
|
||||||
|
|
||||||
# Get the git commit
|
# Get the git commit
|
||||||
GIT_DIRTY=$(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true)
|
GIT_DIRTY=$(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true)
|
||||||
GIT_COMMIT=$(shell git rev-parse --short HEAD)
|
GIT_COMMIT=$(shell git rev-parse --short HEAD)
|
||||||
GIT_IMPORT=github.com/hashicorp/packer/version
|
GIT_IMPORT=github.com/hashicorp/packer/version
|
||||||
UNAME_S := $(shell uname -s)
|
GOLDFLAGS=-X $(GIT_IMPORT).GitCommit=$(GIT_COMMIT)$(GIT_DIRTY)
|
||||||
LDFLAGS=-s -w
|
|
||||||
GOLDFLAGS=-X $(GIT_IMPORT).GitCommit=$(GIT_COMMIT)$(GIT_DIRTY) $(LDFLAGS)
|
|
||||||
|
|
||||||
export GOLDFLAGS
|
export GOLDFLAGS
|
||||||
|
|
||||||
.PHONY: bin checkversion ci ci-lint default install-build-deps install-gen-deps fmt fmt-docs fmt-examples generate install-lint-deps lint \
|
default: deps generate test dev
|
||||||
releasebin test testacc testrace
|
|
||||||
|
|
||||||
default: install-build-deps install-gen-deps generate dev
|
ci: deps test
|
||||||
|
|
||||||
ci: testrace ## Test in continuous integration
|
release: deps test releasebin package ## Build a release build
|
||||||
|
|
||||||
release: install-build-deps test releasebin package ## Build a release build
|
bin: deps ## Build debug/test build
|
||||||
|
@go get github.com/mitchellh/gox
|
||||||
bin: install-build-deps ## Build debug/test build
|
|
||||||
@echo "WARN: 'make bin' is for debug / test builds only. Use 'make release' for release builds."
|
@echo "WARN: 'make bin' is for debug / test builds only. Use 'make release' for release builds."
|
||||||
@GO111MODULE=auto sh -c "$(CURDIR)/scripts/build.sh"
|
@sh -c "$(CURDIR)/scripts/build.sh"
|
||||||
|
|
||||||
releasebin: install-build-deps
|
releasebin: deps
|
||||||
|
@go get github.com/mitchellh/gox
|
||||||
@grep 'const VersionPrerelease = "dev"' version/version.go > /dev/null ; if [ $$? -eq 0 ]; then \
|
@grep 'const VersionPrerelease = "dev"' version/version.go > /dev/null ; if [ $$? -eq 0 ]; then \
|
||||||
echo "ERROR: You must remove prerelease tags from version/version.go prior to release."; \
|
echo "ERROR: You must remove prerelease tags from version/version.go prior to release."; \
|
||||||
exit 1; \
|
exit 1; \
|
||||||
fi
|
fi
|
||||||
@GO111MODULE=auto sh -c "$(CURDIR)/scripts/build.sh"
|
@sh -c "$(CURDIR)/scripts/build.sh"
|
||||||
|
|
||||||
package:
|
package:
|
||||||
$(if $(VERSION),,@echo 'VERSION= needed to release; Use make package skip compilation'; exit 1)
|
$(if $(VERSION),,@echo 'VERSION= needed to release; Use make package skip compilation'; exit 1)
|
||||||
@sh -c "$(CURDIR)/scripts/dist.sh $(VERSION)"
|
@sh -c "$(CURDIR)/scripts/dist.sh $(VERSION)"
|
||||||
|
|
||||||
install-build-deps: ## Install dependencies for bin build
|
deps:
|
||||||
@go install github.com/mitchellh/gox@v1.0.1
|
@go get golang.org/x/tools/cmd/goimports
|
||||||
|
@go get golang.org/x/tools/cmd/stringer
|
||||||
|
@go get -u github.com/mna/pigeon
|
||||||
|
@go get github.com/kardianos/govendor
|
||||||
|
@go get golang.org/x/tools/cmd/goimports
|
||||||
|
|
||||||
install-gen-deps: ## Install dependencies for code generation
|
dev: deps ## Build and install a development build
|
||||||
# to avoid having to tidy our go deps, we `go get` our binaries from a temp
|
|
||||||
# dir. `go get` will change our deps and the following deps are not part of
|
|
||||||
# out code dependencies; so a go mod tidy will remove them again. `go
|
|
||||||
# install` seems to install the last tagged version and we want to install
|
|
||||||
# master.
|
|
||||||
@(cd $(TEMPDIR) && GO111MODULE=on go get github.com/alvaroloes/enumer@master)
|
|
||||||
@go install github.com/hashicorp/packer-plugin-sdk/cmd/packer-sdc@latest
|
|
||||||
|
|
||||||
install-lint-deps: ## Install linter dependencies
|
|
||||||
# Pinning golangci-lint at v1.23.8 as --new-from-rev seems to work properly; the latest 1.24.0 has caused issues with memory consumption
|
|
||||||
@echo "==> Updating linter dependencies..."
|
|
||||||
@curl -sSfL -q https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.23.8
|
|
||||||
|
|
||||||
dev: ## Build and install a development build
|
|
||||||
@grep 'const VersionPrerelease = ""' version/version.go > /dev/null ; if [ $$? -eq 0 ]; then \
|
@grep 'const VersionPrerelease = ""' version/version.go > /dev/null ; if [ $$? -eq 0 ]; then \
|
||||||
echo "ERROR: You must add prerelease tags to version/version.go prior to making a dev build."; \
|
echo "ERROR: You must add prerelease tags to version/version.go prior to making a dev build."; \
|
||||||
exit 1; \
|
exit 1; \
|
||||||
@ -76,41 +60,22 @@ dev: ## Build and install a development build
|
|||||||
@cp $(GOPATH)/bin/packer bin/packer
|
@cp $(GOPATH)/bin/packer bin/packer
|
||||||
@cp $(GOPATH)/bin/packer pkg/$(GOOS)_$(GOARCH)
|
@cp $(GOPATH)/bin/packer pkg/$(GOOS)_$(GOARCH)
|
||||||
|
|
||||||
lint: install-lint-deps ## Lint Go code
|
|
||||||
@if [ ! -z $(PKG_NAME) ]; then \
|
|
||||||
echo "golangci-lint run ./$(PKG_NAME)/..."; \
|
|
||||||
golangci-lint run ./$(PKG_NAME)/...; \
|
|
||||||
else \
|
|
||||||
echo "golangci-lint run ./..."; \
|
|
||||||
golangci-lint run ./...; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
ci-lint: install-lint-deps ## On ci only lint newly added Go source files
|
|
||||||
@echo "==> Running linter on newly added Go source files..."
|
|
||||||
GO111MODULE=on golangci-lint run --new-from-rev=$(shell git merge-base origin/master HEAD) ./...
|
|
||||||
|
|
||||||
fmt: ## Format Go code
|
fmt: ## Format Go code
|
||||||
@go fmt ./...
|
@gofmt -w -s main.go $(UNFORMATTED_FILES)
|
||||||
|
|
||||||
fmt-check: fmt ## Check go code formatting
|
fmt-check: ## Check go code formatting
|
||||||
@echo "==> Checking that code complies with go fmt requirements..."
|
@echo "==> Checking that code complies with gofmt requirements..."
|
||||||
@git diff --exit-code; if [ $$? -eq 1 ]; then \
|
@if [ ! -z "$(UNFORMATTED_FILES)" ]; then \
|
||||||
echo "Found files that are not fmt'ed."; \
|
echo "gofmt needs to be run on the following files:"; \
|
||||||
|
echo "$(UNFORMATTED_FILES)" | xargs -n1; \
|
||||||
echo "You can use the command: \`make fmt\` to reformat code."; \
|
echo "You can use the command: \`make fmt\` to reformat code."; \
|
||||||
exit 1; \
|
exit 1; \
|
||||||
fi
|
|
||||||
|
|
||||||
mode-check: ## Check that only certain files are executable
|
|
||||||
@echo "==> Checking that only certain files are executable..."
|
|
||||||
@if [ ! -z "$(EXECUTABLE_FILES)" ]; then \
|
|
||||||
echo "These files should not be executable or they must be white listed in the Makefile:"; \
|
|
||||||
echo "$(EXECUTABLE_FILES)" | xargs -n1; \
|
|
||||||
exit 1; \
|
|
||||||
else \
|
else \
|
||||||
echo "Check passed."; \
|
echo "Check passed."; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
fmt-docs:
|
fmt-docs:
|
||||||
@find ./website/pages/docs -name "*.md" -exec pandoc --wrap auto --columns 79 --atx-headers -s -f "markdown_github+yaml_metadata_block" -t "markdown_github+yaml_metadata_block" {} -o {} \;
|
@find ./website/source/docs -name "*.md" -exec pandoc --wrap auto --columns 79 --atx-headers -s -f "markdown_github+yaml_metadata_block" -t "markdown_github+yaml_metadata_block" {} -o {} \;
|
||||||
|
|
||||||
# Install js-beautify with npm install -g js-beautify
|
# Install js-beautify with npm install -g js-beautify
|
||||||
fmt-examples:
|
fmt-examples:
|
||||||
@ -118,47 +83,31 @@ fmt-examples:
|
|||||||
|
|
||||||
# generate runs `go generate` to build the dynamically generated
|
# generate runs `go generate` to build the dynamically generated
|
||||||
# source files.
|
# source files.
|
||||||
generate: install-gen-deps ## Generate dynamically generated code
|
generate: deps ## Generate dynamically generated code
|
||||||
@echo "==> removing autogenerated markdown..." # but don't remove partials generated in the SDK and copied over.
|
go generate .
|
||||||
@find website/pages -path website/pages/partials/packer-plugin-sdk -prune -o -type f | xargs grep -l '^<!-- Code generated' | xargs rm -f
|
gofmt -w common/bootcommand/boot_command.go
|
||||||
@echo "==> removing autogenerated code..."
|
goimports -w common/bootcommand/boot_command.go
|
||||||
@find post-processor helper builder provisioner -type f | xargs grep -l '^// Code generated' | xargs rm -f
|
gofmt -w command/plugin.go
|
||||||
PROJECT_ROOT="$(shell pwd)" go generate $(shell go list ./... | grep -v packer-plugin-sdk)
|
|
||||||
|
|
||||||
generate-check: generate ## Check go code generation is on par
|
test: deps fmt-check ## Run unit tests
|
||||||
@echo "==> Checking that auto-generated code is not changed..."
|
@go test $(TEST) $(TESTARGS) -timeout=2m
|
||||||
@git diff --exit-code; if [ $$? -eq 1 ]; then \
|
@go tool vet $(VET) ; if [ $$? -eq 1 ]; then \
|
||||||
echo "Found diffs in go generated code."; \
|
|
||||||
echo "You can use the command: \`make generate\` to reformat code."; \
|
|
||||||
exit 1; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
test: mode-check vet ## Run unit tests
|
|
||||||
@go test -count $(COUNT) $(TEST) $(TESTARGS) -timeout=3m
|
|
||||||
|
|
||||||
# acctest runs provisioners acceptance tests
|
|
||||||
provisioners-acctest: #install-build-deps generate
|
|
||||||
ACC_TEST_BUILDERS=$(ACC_TEST_BUILDERS) go test $(TEST) $(TESTARGS) -timeout=1h
|
|
||||||
|
|
||||||
# testacc runs acceptance tests
|
|
||||||
testacc: # install-build-deps generate ## Run acceptance tests
|
|
||||||
@echo "WARN: Acceptance tests will take a long time to run and may cost money. Ctrl-C if you want to cancel."
|
|
||||||
PACKER_ACC=1 go test -count $(COUNT) -v $(TEST) $(TESTARGS) -timeout=120m
|
|
||||||
|
|
||||||
testrace: mode-check vet ## Test with race detection enabled
|
|
||||||
@go test -count $(COUNT) -race $(TEST) $(TESTARGS) -timeout=3m -p=8
|
|
||||||
|
|
||||||
# Runs code coverage and open a html page with report
|
|
||||||
cover:
|
|
||||||
go test -count $(COUNT) $(TEST) $(TESTARGS) -timeout=3m -coverprofile=coverage.out
|
|
||||||
go tool cover -html=coverage.out
|
|
||||||
rm coverage.out
|
|
||||||
|
|
||||||
vet: ## Vet Go code
|
|
||||||
@go vet $(VET) ; if [ $$? -eq 1 ]; then \
|
|
||||||
echo "ERROR: Vet found problems in the code."; \
|
echo "ERROR: Vet found problems in the code."; \
|
||||||
exit 1; \
|
exit 1; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# testacc runs acceptance tests
|
||||||
|
testacc: deps generate ## Run acceptance tests
|
||||||
|
@echo "WARN: Acceptance tests will take a long time to run and may cost money. Ctrl-C if you want to cancel."
|
||||||
|
PACKER_ACC=1 go test -v $(TEST) $(TESTARGS) -timeout=45m
|
||||||
|
|
||||||
|
testrace: deps ## Test for race conditions
|
||||||
|
@go test -race $(TEST) $(TESTARGS) -timeout=2m
|
||||||
|
|
||||||
|
updatedeps:
|
||||||
|
@echo "INFO: Packer deps are managed by govendor. See .github/CONTRIBUTING.md"
|
||||||
|
|
||||||
help:
|
help:
|
||||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||||
|
|
||||||
|
.PHONY: bin checkversion ci default deps fmt fmt-docs fmt-examples generate releasebin test testacc testrace updatedeps
|
||||||
|
94
README.md
94
README.md
@ -1,23 +1,22 @@
|
|||||||
# Packer
|
# Packer
|
||||||
|
|
||||||
[![Build Status][circleci-badge]][circleci]
|
[![Build Status][travis-badge]][travis]
|
||||||
[](https://discuss.hashicorp.com/c/packer)
|
[![Windows Build Status][appveyor-badge]][appveyor]
|
||||||
[](https://pkg.go.dev/github.com/hashicorp/packer)
|
[![GoDoc][godoc-badge]][godoc]
|
||||||
[![GoReportCard][report-badge]][report]
|
[![GoReportCard][report-badge]][report]
|
||||||
|
|
||||||
[circleci-badge]: https://circleci.com/gh/hashicorp/packer.svg?style=svg
|
[travis-badge]: https://travis-ci.org/hashicorp/packer.svg?branch=master
|
||||||
[circleci]: https://app.circleci.com/pipelines/github/hashicorp/packer
|
[travis]: https://travis-ci.org/hashicorp/packer
|
||||||
[appveyor-badge]: https://ci.appveyor.com/api/projects/status/miavlgnp989e5obc/branch/master?svg=true
|
[appveyor-badge]: https://ci.appveyor.com/api/projects/status/miavlgnp989e5obc/branch/master?svg=true
|
||||||
|
[appveyor]: https://ci.appveyor.com/project/hashicorp/packer
|
||||||
[godoc-badge]: https://godoc.org/github.com/hashicorp/packer?status.svg
|
[godoc-badge]: https://godoc.org/github.com/hashicorp/packer?status.svg
|
||||||
[godoc]: https://godoc.org/github.com/hashicorp/packer
|
[godoc]: https://godoc.org/github.com/hashicorp/packer
|
||||||
[report-badge]: https://goreportcard.com/badge/github.com/hashicorp/packer
|
[report-badge]: https://goreportcard.com/badge/github.com/hashicorp/packer
|
||||||
[report]: https://goreportcard.com/report/github.com/hashicorp/packer
|
[report]: https://goreportcard.com/report/github.com/hashicorp/packer
|
||||||
|
|
||||||
<p align="center" style="text-align:center;">
|
* Website: https://www.packer.io
|
||||||
<a href="https://www.packer.io">
|
* IRC: `#packer-tool` on Freenode
|
||||||
<img alt="HashiCorp Packer logo" src="website/public/img/logo-packer-padded.svg" width="500" />
|
* Mailing list: [Google Groups](https://groups.google.com/forum/#!forum/packer-tool)
|
||||||
</a>
|
|
||||||
</p>
|
|
||||||
|
|
||||||
Packer is a tool for building identical machine images for multiple platforms
|
Packer is a tool for building identical machine images for multiple platforms
|
||||||
from a single source configuration.
|
from a single source configuration.
|
||||||
@ -25,7 +24,7 @@ from a single source configuration.
|
|||||||
Packer is lightweight, runs on every major operating system, and is highly
|
Packer is lightweight, runs on every major operating system, and is highly
|
||||||
performant, creating machine images for multiple platforms in parallel. Packer
|
performant, creating machine images for multiple platforms in parallel. Packer
|
||||||
comes out of the box with support for many platforms, the full list of which can
|
comes out of the box with support for many platforms, the full list of which can
|
||||||
be found at https://www.packer.io/docs/builders.
|
be found at https://www.packer.io/docs/builders/index.html.
|
||||||
|
|
||||||
Support for other platforms can be added via plugins.
|
Support for other platforms can be added via plugins.
|
||||||
|
|
||||||
@ -47,43 +46,33 @@ yourself](https://github.com/hashicorp/packer/blob/master/.github/CONTRIBUTING.m
|
|||||||
|
|
||||||
After Packer is installed, create your first template, which tells Packer
|
After Packer is installed, create your first template, which tells Packer
|
||||||
what platforms to build images for and how you want to build them. In our
|
what platforms to build images for and how you want to build them. In our
|
||||||
case, we'll create a simple AMI that has Redis pre-installed.
|
case, we'll create a simple AMI that has Redis pre-installed. Save this
|
||||||
|
file as `quick-start.json`. Export your AWS credentials as the
|
||||||
Save this file as `quick-start.pkr.hcl`. Export your AWS credentials as the
|
|
||||||
`AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables.
|
`AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables.
|
||||||
|
|
||||||
```hcl
|
```json
|
||||||
variable "access_key" {
|
{
|
||||||
type = string
|
"variables": {
|
||||||
default = "${env("AWS_ACCESS_KEY_ID")}"
|
"access_key": "{{env `AWS_ACCESS_KEY_ID`}}",
|
||||||
}
|
"secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}"
|
||||||
|
},
|
||||||
variable "secret_key" {
|
"builders": [{
|
||||||
type = string
|
"type": "amazon-ebs",
|
||||||
default = "${env("AWS_SECRET_ACCESS_KEY")}"
|
"access_key": "{{user `access_key`}}",
|
||||||
}
|
"secret_key": "{{user `secret_key`}}",
|
||||||
|
"region": "us-east-1",
|
||||||
locals { timestamp = regex_replace(timestamp(), "[- TZ:]", "") }
|
"source_ami": "ami-af22d9b9",
|
||||||
|
"instance_type": "t2.micro",
|
||||||
source "amazon-ebs" "quick-start" {
|
"ssh_username": "ubuntu",
|
||||||
access_key = "${var.access_key}"
|
"ami_name": "packer-example {{timestamp}}"
|
||||||
ami_name = "packer-example ${local.timestamp}"
|
}]
|
||||||
instance_type = "t2.micro"
|
|
||||||
region = "us-east-1"
|
|
||||||
secret_key = "${var.secret_key}"
|
|
||||||
source_ami = "ami-af22d9b9"
|
|
||||||
ssh_username = "ubuntu"
|
|
||||||
}
|
|
||||||
|
|
||||||
build {
|
|
||||||
sources = ["source.amazon-ebs.quick-start"]
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Next, tell Packer to build the image:
|
Next, tell Packer to build the image:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ packer build quick-start.pkr.hcl
|
$ packer build quick-start.json
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -91,32 +80,17 @@ Packer will build an AMI according to the "quick-start" template. The AMI
|
|||||||
will be available in your AWS account. To delete the AMI, you must manually
|
will be available in your AWS account. To delete the AMI, you must manually
|
||||||
delete it using the [AWS console](https://console.aws.amazon.com/). Packer
|
delete it using the [AWS console](https://console.aws.amazon.com/). Packer
|
||||||
builds your images, it does not manage their lifecycle. Where they go, how
|
builds your images, it does not manage their lifecycle. Where they go, how
|
||||||
they're run, etc., is up to you.
|
they're run, etc. is up to you.
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
Comprehensive documentation is viewable on the Packer website at https://www.packer.io/docs.
|
Comprehensive documentation is viewable on the Packer website:
|
||||||
|
|
||||||
## Contributing to Packer
|
https://www.packer.io/docs
|
||||||
|
|
||||||
|
## Developing Packer
|
||||||
|
|
||||||
See
|
See
|
||||||
[CONTRIBUTING.md](https://github.com/hashicorp/packer/blob/master/.github/CONTRIBUTING.md)
|
[CONTRIBUTING.md](https://github.com/hashicorp/packer/blob/master/.github/CONTRIBUTING.md)
|
||||||
for best practices and instructions on setting up your development environment
|
for best practices and instructions on setting up your development environment
|
||||||
to work on Packer.
|
to work on Packer.
|
||||||
|
|
||||||
## Unmaintained Plugins
|
|
||||||
As contributors' circumstances change, development on a community maintained
|
|
||||||
plugin can slow. When this happens, the Packer team may mark a plugin as
|
|
||||||
unmaintained, to clearly signal the plugin's status to users.
|
|
||||||
|
|
||||||
What does **unmaintained** mean?
|
|
||||||
|
|
||||||
1. The code repository and all commit history will still be available.
|
|
||||||
1. Documentation will remain on the Packer website.
|
|
||||||
1. Issues and pull requests are monitored as a best effort.
|
|
||||||
1. No active development will be performed by the Packer team.
|
|
||||||
|
|
||||||
If anyone form them community is interested in maintaining a community
|
|
||||||
supported plugin, please feel free to submit contributions via a pull-
|
|
||||||
request for review; reviews are generally prioritized over feature work
|
|
||||||
when possible. For a list of open plugin issues and pending feature requests see the [Packer Issue Tracker](https://github.com/hashicorp/packer/issues/).
|
|
||||||
|
12
Vagrantfile
vendored
12
Vagrantfile
vendored
@ -5,9 +5,9 @@ LINUX_BASE_BOX = "bento/ubuntu-16.04"
|
|||||||
FREEBSD_BASE_BOX = "jen20/FreeBSD-12.0-CURRENT"
|
FREEBSD_BASE_BOX = "jen20/FreeBSD-12.0-CURRENT"
|
||||||
|
|
||||||
Vagrant.configure(2) do |config|
|
Vagrant.configure(2) do |config|
|
||||||
if Vagrant.has_plugin?("vagrant-cachier")
|
if Vagrant.has_plugin?("vagrant-cachier")
|
||||||
config.cache.scope = :box
|
config.cache.scope = :box
|
||||||
end
|
end
|
||||||
|
|
||||||
# Compilation and development boxes
|
# Compilation and development boxes
|
||||||
config.vm.define "linux", autostart: true, primary: true do |vmCfg|
|
config.vm.define "linux", autostart: true, primary: true do |vmCfg|
|
||||||
@ -73,12 +73,6 @@ def configureProviders(vmCfg, cpus: "2", memory: "2048")
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
vmCfg.vm.provider "docker" do |d, override|
|
|
||||||
d.build_dir = "."
|
|
||||||
d.has_ssh = true
|
|
||||||
override.vm.box = nil
|
|
||||||
end
|
|
||||||
|
|
||||||
return vmCfg
|
return vmCfg
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -1,71 +0,0 @@
|
|||||||
// component_acc_test.go should contain acceptance tests for plugin components
|
|
||||||
// to make sure all component types can be discovered and started.
|
|
||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
_ "embed"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
amazonacc "github.com/hashicorp/packer-plugin-amazon/builder/ebs/acceptance"
|
|
||||||
"github.com/hashicorp/packer-plugin-sdk/acctest"
|
|
||||||
"github.com/hashicorp/packer/hcl2template/addrs"
|
|
||||||
)
|
|
||||||
|
|
||||||
//go:embed test-fixtures/basic-amazon-ami-datasource.pkr.hcl
|
|
||||||
var basicAmazonAmiDatasourceHCL2Template string
|
|
||||||
|
|
||||||
func TestAccInitAndBuildBasicAmazonAmiDatasource(t *testing.T) {
|
|
||||||
plugin := addrs.Plugin{
|
|
||||||
Hostname: "github.com",
|
|
||||||
Namespace: "hashicorp",
|
|
||||||
Type: "amazon",
|
|
||||||
}
|
|
||||||
testCase := &acctest.PluginTestCase{
|
|
||||||
Name: "amazon-ami_basic_datasource_test",
|
|
||||||
Setup: func() error {
|
|
||||||
return cleanupPluginInstallation(plugin)
|
|
||||||
},
|
|
||||||
Teardown: func() error {
|
|
||||||
helper := amazonacc.AWSHelper{
|
|
||||||
Region: "us-west-2",
|
|
||||||
AMIName: "packer-amazon-ami-test",
|
|
||||||
}
|
|
||||||
return helper.CleanUpAmi()
|
|
||||||
},
|
|
||||||
Template: basicAmazonAmiDatasourceHCL2Template,
|
|
||||||
Type: "amazon-ami",
|
|
||||||
Init: true,
|
|
||||||
CheckInit: func(initCommand *exec.Cmd, logfile string) error {
|
|
||||||
if initCommand.ProcessState != nil {
|
|
||||||
if initCommand.ProcessState.ExitCode() != 0 {
|
|
||||||
return fmt.Errorf("Bad exit code. Logfile: %s", logfile)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
logs, err := os.Open(logfile)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Unable find %s", logfile)
|
|
||||||
}
|
|
||||||
defer logs.Close()
|
|
||||||
|
|
||||||
logsBytes, err := ioutil.ReadAll(logs)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Unable to read %s", logfile)
|
|
||||||
}
|
|
||||||
initOutput := string(logsBytes)
|
|
||||||
return checkPluginInstallation(initOutput, plugin)
|
|
||||||
},
|
|
||||||
Check: func(buildCommand *exec.Cmd, logfile string) error {
|
|
||||||
if buildCommand.ProcessState != nil {
|
|
||||||
if buildCommand.ProcessState.ExitCode() != 0 {
|
|
||||||
return fmt.Errorf("Bad exit code. Logfile: %s", logfile)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
acctest.TestPlugin(t, testCase)
|
|
||||||
}
|
|
@ -1,112 +0,0 @@
|
|||||||
// plugin_acc_test.go should contain acceptance tests for features related to
|
|
||||||
// installing, discovering and running plugins.
|
|
||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
_ "embed"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
amazonacc "github.com/hashicorp/packer-plugin-amazon/builder/ebs/acceptance"
|
|
||||||
"github.com/hashicorp/packer-plugin-sdk/acctest"
|
|
||||||
"github.com/hashicorp/packer-plugin-sdk/acctest/testutils"
|
|
||||||
"github.com/hashicorp/packer/hcl2template/addrs"
|
|
||||||
"github.com/mitchellh/go-homedir"
|
|
||||||
)
|
|
||||||
|
|
||||||
//go:embed test-fixtures/basic-amazon-ebs.pkr.hcl
|
|
||||||
var basicAmazonEbsHCL2Template string
|
|
||||||
|
|
||||||
func TestAccInitAndBuildBasicAmazonEbs(t *testing.T) {
|
|
||||||
plugin := addrs.Plugin{
|
|
||||||
Hostname: "github.com",
|
|
||||||
Namespace: "hashicorp",
|
|
||||||
Type: "amazon",
|
|
||||||
}
|
|
||||||
testCase := &acctest.PluginTestCase{
|
|
||||||
Name: "amazon-ebs_basic_plugin_init_and_build_test",
|
|
||||||
Setup: func() error {
|
|
||||||
return cleanupPluginInstallation(plugin)
|
|
||||||
},
|
|
||||||
Teardown: func() error {
|
|
||||||
helper := amazonacc.AWSHelper{
|
|
||||||
Region: "us-east-1",
|
|
||||||
AMIName: "packer-plugin-amazon-ebs-test",
|
|
||||||
}
|
|
||||||
return helper.CleanUpAmi()
|
|
||||||
},
|
|
||||||
Template: basicAmazonEbsHCL2Template,
|
|
||||||
Type: "amazon-ebs",
|
|
||||||
Init: true,
|
|
||||||
CheckInit: func(initCommand *exec.Cmd, logfile string) error {
|
|
||||||
if initCommand.ProcessState != nil {
|
|
||||||
if initCommand.ProcessState.ExitCode() != 0 {
|
|
||||||
return fmt.Errorf("Bad exit code. Logfile: %s", logfile)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
logs, err := os.Open(logfile)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Unable find %s", logfile)
|
|
||||||
}
|
|
||||||
defer logs.Close()
|
|
||||||
|
|
||||||
logsBytes, err := ioutil.ReadAll(logs)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Unable to read %s", logfile)
|
|
||||||
}
|
|
||||||
initOutput := string(logsBytes)
|
|
||||||
return checkPluginInstallation(initOutput, plugin)
|
|
||||||
},
|
|
||||||
Check: func(buildCommand *exec.Cmd, logfile string) error {
|
|
||||||
if buildCommand.ProcessState != nil {
|
|
||||||
if buildCommand.ProcessState.ExitCode() != 0 {
|
|
||||||
return fmt.Errorf("Bad exit code. Logfile: %s", logfile)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
acctest.TestPlugin(t, testCase)
|
|
||||||
}
|
|
||||||
|
|
||||||
func cleanupPluginInstallation(plugin addrs.Plugin) error {
|
|
||||||
home, err := homedir.Dir()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
pluginPath := filepath.Join(home,
|
|
||||||
".packer.d",
|
|
||||||
"plugins",
|
|
||||||
plugin.Hostname,
|
|
||||||
plugin.Namespace,
|
|
||||||
plugin.Type)
|
|
||||||
testutils.CleanupFiles(pluginPath)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkPluginInstallation(initOutput string, plugin addrs.Plugin) error {
|
|
||||||
expectedInitLog := "Installed plugin " + plugin.String()
|
|
||||||
if matched, _ := regexp.MatchString(expectedInitLog+".*", initOutput); !matched {
|
|
||||||
return fmt.Errorf("logs doesn't contain expected foo value %q", initOutput)
|
|
||||||
}
|
|
||||||
|
|
||||||
home, err := homedir.Dir()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
pluginPath := filepath.Join(home,
|
|
||||||
".packer.d",
|
|
||||||
"plugins",
|
|
||||||
plugin.Hostname,
|
|
||||||
plugin.Namespace,
|
|
||||||
plugin.Type)
|
|
||||||
if !testutils.FileExists(pluginPath) {
|
|
||||||
return fmt.Errorf("%s plugin installation not found", plugin.String())
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,33 +0,0 @@
|
|||||||
packer {
|
|
||||||
required_plugins {
|
|
||||||
amazon = {
|
|
||||||
version = ">= 0.0.1"
|
|
||||||
source = "github.com/hashicorp/amazon"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
data "amazon-ami" "test" {
|
|
||||||
filters = {
|
|
||||||
name = "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*"
|
|
||||||
root-device-type = "ebs"
|
|
||||||
virtualization-type = "hvm"
|
|
||||||
}
|
|
||||||
most_recent = true
|
|
||||||
owners = ["099720109477"]
|
|
||||||
}
|
|
||||||
|
|
||||||
source "amazon-ebs" "basic-example" {
|
|
||||||
region = "us-west-2"
|
|
||||||
source_ami = data.amazon-ami.test.id
|
|
||||||
ami_name = "packer-amazon-ami-test"
|
|
||||||
communicator = "ssh"
|
|
||||||
instance_type = "t2.micro"
|
|
||||||
ssh_username = "ubuntu"
|
|
||||||
}
|
|
||||||
|
|
||||||
build {
|
|
||||||
sources = [
|
|
||||||
"source.amazon-ebs.basic-example"
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,20 +0,0 @@
|
|||||||
packer {
|
|
||||||
required_plugins {
|
|
||||||
amazon = {
|
|
||||||
version = ">= 0.0.1"
|
|
||||||
source = "github.com/hashicorp/amazon"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
source "amazon-ebs" "basic-test" {
|
|
||||||
region = "us-east-1"
|
|
||||||
instance_type = "m3.medium"
|
|
||||||
source_ami = "ami-76b2a71e"
|
|
||||||
ssh_username = "ubuntu"
|
|
||||||
ami_name = "packer-plugin-amazon-ebs-test"
|
|
||||||
}
|
|
||||||
|
|
||||||
build {
|
|
||||||
sources = ["source.amazon-ebs.basic-test"]
|
|
||||||
}
|
|
@ -1,224 +0,0 @@
|
|||||||
package acctest
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
|
||||||
"github.com/hashicorp/packer-plugin-sdk/template"
|
|
||||||
"github.com/hashicorp/packer/packer"
|
|
||||||
"github.com/hashicorp/packer/provisioner/file"
|
|
||||||
shellprovisioner "github.com/hashicorp/packer/provisioner/shell"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestEnvVar must be set to a non-empty value for acceptance tests to run.
|
|
||||||
const TestEnvVar = "PACKER_ACC"
|
|
||||||
|
|
||||||
// TestCase is a single set of tests to run for a backend. A TestCase
|
|
||||||
// should generally map 1:1 to each test method for your acceptance
|
|
||||||
// tests.
|
|
||||||
type TestCase struct {
|
|
||||||
// Precheck, if non-nil, will be called once before the test case
|
|
||||||
// runs at all. This can be used for some validation prior to the
|
|
||||||
// test running.
|
|
||||||
PreCheck func()
|
|
||||||
|
|
||||||
// Builder is the Builder that will be tested. It will be available
|
|
||||||
// as the "test" builder in the template.
|
|
||||||
Builder packersdk.Builder
|
|
||||||
|
|
||||||
// Template is the template contents to use.
|
|
||||||
Template string
|
|
||||||
|
|
||||||
// Check is called after this step is executed in order to test that
|
|
||||||
// the step executed successfully. If this is not set, then the next
|
|
||||||
// step will be called
|
|
||||||
Check TestCheckFunc
|
|
||||||
|
|
||||||
// Teardown will be called before the test case is over regardless
|
|
||||||
// of if the test succeeded or failed. This should return an error
|
|
||||||
// in the case that the test can't guarantee all resources were
|
|
||||||
// properly cleaned up.
|
|
||||||
Teardown TestTeardownFunc
|
|
||||||
|
|
||||||
// If SkipArtifactTeardown is true, we will not attempt to destroy the
|
|
||||||
// artifact created in this test run.
|
|
||||||
SkipArtifactTeardown bool
|
|
||||||
// If set, overrides the default provisioner store with custom provisioners.
|
|
||||||
// This can be useful for running acceptance tests for a particular
|
|
||||||
// provisioner using a specific builder.
|
|
||||||
// Default provisioner store:
|
|
||||||
// ProvisionerStore: packersdk.MapOfProvisioner{
|
|
||||||
// "shell": func() (packersdk.Provisioner, error) { return &shellprovisioner.Provisioner{}, nil },
|
|
||||||
// "file": func() (packersdk.Provisioner, error) { return &file.Provisioner{}, nil },
|
|
||||||
// },
|
|
||||||
ProvisionerStore packersdk.MapOfProvisioner
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestCheckFunc is the callback used for Check in TestStep.
|
|
||||||
type TestCheckFunc func([]packersdk.Artifact) error
|
|
||||||
|
|
||||||
// TestTeardownFunc is the callback used for Teardown in TestCase.
|
|
||||||
type TestTeardownFunc func() error
|
|
||||||
|
|
||||||
// TestT is the interface used to handle the test lifecycle of a test.
|
|
||||||
//
|
|
||||||
// Users should just use a *testing.T object, which implements this.
|
|
||||||
type TestT interface {
|
|
||||||
Error(args ...interface{})
|
|
||||||
Fatal(args ...interface{})
|
|
||||||
Skip(args ...interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
type TestBuilderSet struct {
|
|
||||||
packer.BuilderSet
|
|
||||||
StartFn func(name string) (packersdk.Builder, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tbs TestBuilderSet) Start(name string) (packersdk.Builder, error) { return tbs.StartFn(name) }
|
|
||||||
|
|
||||||
// Test performs an acceptance test on a backend with the given test case.
|
|
||||||
//
|
|
||||||
// Tests are not run unless an environmental variable "PACKER_ACC" is
|
|
||||||
// set to some non-empty value. This is to avoid test cases surprising
|
|
||||||
// a user by creating real resources.
|
|
||||||
//
|
|
||||||
// Tests will fail unless the verbose flag (`go test -v`, or explicitly
|
|
||||||
// the "-test.v" flag) is set. Because some acceptance tests take quite
|
|
||||||
// long, we require the verbose flag so users are able to see progress
|
|
||||||
// output.
|
|
||||||
func Test(t TestT, c TestCase) {
|
|
||||||
// We only run acceptance tests if an env var is set because they're
|
|
||||||
// slow and generally require some outside configuration.
|
|
||||||
if os.Getenv(TestEnvVar) == "" {
|
|
||||||
t.Skip(fmt.Sprintf(
|
|
||||||
"Acceptance tests skipped unless env '%s' set",
|
|
||||||
TestEnvVar))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// We require verbose mode so that the user knows what is going on.
|
|
||||||
if !testTesting && !testing.Verbose() {
|
|
||||||
t.Fatal("Acceptance tests must be run with the -v flag on tests")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the PreCheck if we have it
|
|
||||||
if c.PreCheck != nil {
|
|
||||||
c.PreCheck()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the template
|
|
||||||
log.Printf("[DEBUG] Parsing template...")
|
|
||||||
tpl, err := template.Parse(strings.NewReader(c.Template))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(fmt.Sprintf("Failed to parse template: %s", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.ProvisionerStore == nil {
|
|
||||||
c.ProvisionerStore = packersdk.MapOfProvisioner{
|
|
||||||
"shell": func() (packersdk.Provisioner, error) { return &shellprovisioner.Provisioner{}, nil },
|
|
||||||
"file": func() (packersdk.Provisioner, error) { return &file.Provisioner{}, nil },
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Build the core
|
|
||||||
log.Printf("[DEBUG] Initializing core...")
|
|
||||||
core := packer.NewCore(&packer.CoreConfig{
|
|
||||||
Components: packer.ComponentFinder{
|
|
||||||
PluginConfig: &packer.PluginConfig{
|
|
||||||
Builders: TestBuilderSet{
|
|
||||||
StartFn: func(n string) (packersdk.Builder, error) {
|
|
||||||
if n == "test" {
|
|
||||||
return c.Builder, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Provisioners: c.ProvisionerStore,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Template: tpl,
|
|
||||||
})
|
|
||||||
err = core.Initialize()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(fmt.Sprintf("Failed to init core: %s", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the build
|
|
||||||
log.Printf("[DEBUG] Retrieving 'test' build")
|
|
||||||
build, err := core.Build("test")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(fmt.Sprintf("Failed to get 'test' build: %s", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare it
|
|
||||||
log.Printf("[DEBUG] Preparing 'test' build")
|
|
||||||
warnings, err := build.Prepare()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(fmt.Sprintf("Prepare error: %s", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(warnings) > 0 {
|
|
||||||
t.Fatal(fmt.Sprintf(
|
|
||||||
"Prepare warnings:\n\n%s",
|
|
||||||
strings.Join(warnings, "\n")))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run it! We use a temporary directory for caching and discard
|
|
||||||
// any UI output. We discard since it shows up in logs anyways.
|
|
||||||
log.Printf("[DEBUG] Running 'test' build")
|
|
||||||
ui := &packersdk.BasicUi{
|
|
||||||
Reader: os.Stdin,
|
|
||||||
Writer: ioutil.Discard,
|
|
||||||
ErrorWriter: ioutil.Discard,
|
|
||||||
PB: &packersdk.NoopProgressTracker{},
|
|
||||||
}
|
|
||||||
artifacts, err := build.Run(context.Background(), ui)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(fmt.Sprintf("Run error:\n\n%s", err))
|
|
||||||
goto TEARDOWN
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check function
|
|
||||||
if c.Check != nil {
|
|
||||||
log.Printf("[DEBUG] Running check function")
|
|
||||||
if err := c.Check(artifacts); err != nil {
|
|
||||||
t.Fatal(fmt.Sprintf("Check error:\n\n%s", err))
|
|
||||||
goto TEARDOWN
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TEARDOWN:
|
|
||||||
if !c.SkipArtifactTeardown {
|
|
||||||
// Delete all artifacts
|
|
||||||
for _, a := range artifacts {
|
|
||||||
if err := a.Destroy(); err != nil {
|
|
||||||
t.Error(fmt.Sprintf(
|
|
||||||
"!!! ERROR REMOVING ARTIFACT '%s': %s !!!",
|
|
||||||
a.String(), err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Teardown
|
|
||||||
if c.Teardown != nil {
|
|
||||||
log.Printf("[DEBUG] Running teardown function")
|
|
||||||
if err := c.Teardown(); err != nil {
|
|
||||||
t.Fatal(fmt.Sprintf("Teardown failure:\n\n%s", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is for unit tests of this package.
|
|
||||||
var testTesting = false
|
|
85
appveyor.yml
Normal file
85
appveyor.yml
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
# appveyor.yml reference : http://www.appveyor.com/docs/appveyor-yml
|
||||||
|
|
||||||
|
version: "{build}"
|
||||||
|
|
||||||
|
skip_tags: true
|
||||||
|
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- master
|
||||||
|
|
||||||
|
os: Windows Server 2012 R2
|
||||||
|
|
||||||
|
environment:
|
||||||
|
GOPATH: c:\gopath
|
||||||
|
|
||||||
|
clone_folder: c:\gopath\src\github.com\hashicorp\packer
|
||||||
|
|
||||||
|
install:
|
||||||
|
- ps: |
|
||||||
|
# Installs golang on Windows.
|
||||||
|
#
|
||||||
|
# # Run script:
|
||||||
|
# .\install-go.ps1
|
||||||
|
#
|
||||||
|
# # Download and run script:
|
||||||
|
# $env:GOVERSION = '1.5.3'
|
||||||
|
# iex ((new-object net.webclient).DownloadString('SCRIPT_URL_HERE'))
|
||||||
|
|
||||||
|
$version = '1.11'
|
||||||
|
|
||||||
|
$downloadDir = $env:TEMP
|
||||||
|
$packageName = 'golang'
|
||||||
|
$url32 = 'https://storage.googleapis.com/golang/go' + $version + '.windows-386.zip'
|
||||||
|
$url64 = 'https://storage.googleapis.com/golang/go' + $version + '.windows-amd64.zip'
|
||||||
|
$goroot = "C:\go$version"
|
||||||
|
|
||||||
|
# Determine type of system
|
||||||
|
if ($ENV:PROCESSOR_ARCHITECTURE -eq "AMD64") {
|
||||||
|
$url = $url64
|
||||||
|
} else {
|
||||||
|
$url = $url32
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Test-Path "$goroot\bin\go") {
|
||||||
|
Write-Host "Go is installed to $goroot"
|
||||||
|
exit
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "Downloading $url"
|
||||||
|
$zip = "$downloadDir\golang-$version.zip"
|
||||||
|
if (!(Test-Path "$zip")) {
|
||||||
|
$downloader = new-object System.Net.WebClient
|
||||||
|
$downloader.DownloadFile($url, $zip)
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "Extracting $zip to $goroot"
|
||||||
|
if (Test-Path "$downloadDir\go") {
|
||||||
|
rm -Force -Recurse -Path "$downloadDir\go"
|
||||||
|
}
|
||||||
|
Add-Type -AssemblyName System.IO.Compression.FileSystem
|
||||||
|
[System.IO.Compression.ZipFile]::ExtractToDirectory("$zip", $downloadDir)
|
||||||
|
mv "$downloadDir\go" $goroot
|
||||||
|
- set GO15VENDOREXPERIMENT=1
|
||||||
|
- set GOROOT=C:\go1.11
|
||||||
|
- set Path=%GOROOT%\bin;%PATH%
|
||||||
|
- echo %Path%
|
||||||
|
- echo %GOROOT%
|
||||||
|
- go version
|
||||||
|
- go env
|
||||||
|
- go get github.com/mitchellh/gox
|
||||||
|
- go get golang.org/x/tools/cmd/stringer
|
||||||
|
|
||||||
|
build_script:
|
||||||
|
- git rev-parse HEAD
|
||||||
|
# go test $(go list ./... | grep -v vendor)
|
||||||
|
- ps: |
|
||||||
|
go.exe test (go.exe list ./... `
|
||||||
|
|? { -not $_.Contains('/vendor/') } `
|
||||||
|
|? { $_ -ne 'github.com/hashicorp/packer/builder/parallels/common' } `
|
||||||
|
|? { $_ -ne 'github.com/hashicorp/packer/common' }`
|
||||||
|
|? { $_ -ne 'github.com/hashicorp/packer/provisioner/ansible' })
|
||||||
|
|
||||||
|
test: off
|
||||||
|
|
||||||
|
deploy: off
|
@ -1,22 +0,0 @@
|
|||||||
// +build !openbsd
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/shirou/gopsutil/process"
|
|
||||||
)
|
|
||||||
|
|
||||||
func checkProcess(currentPID int) (bool, error) {
|
|
||||||
myProc, err := process.NewProcess(int32(currentPID))
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("Process check error: %s", err)
|
|
||||||
}
|
|
||||||
bg, err := myProc.Background()
|
|
||||||
if err != nil {
|
|
||||||
return bg, fmt.Errorf("Process background check error: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return bg, nil
|
|
||||||
}
|
|
@ -1,10 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
func checkProcess(currentPID int) (bool, error) {
|
|
||||||
return false, fmt.Errorf("cannot determine if process is backgrounded in " +
|
|
||||||
"openbsd")
|
|
||||||
}
|
|
89
builder/alicloud/ecs/access_config.go
Normal file
89
builder/alicloud/ecs/access_config.go
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config of alicloud
|
||||||
|
type AlicloudAccessConfig struct {
|
||||||
|
AlicloudAccessKey string `mapstructure:"access_key"`
|
||||||
|
AlicloudSecretKey string `mapstructure:"secret_key"`
|
||||||
|
AlicloudRegion string `mapstructure:"region"`
|
||||||
|
AlicloudSkipValidation bool `mapstructure:"skip_region_validation"`
|
||||||
|
SecurityToken string `mapstructure:"security_token"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client for AlicloudClient
|
||||||
|
func (c *AlicloudAccessConfig) Client() (*ecs.Client, error) {
|
||||||
|
if err := c.loadAndValidate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if c.SecurityToken == "" {
|
||||||
|
c.SecurityToken = os.Getenv("SECURITY_TOKEN")
|
||||||
|
}
|
||||||
|
client := ecs.NewECSClientWithSecurityToken(c.AlicloudAccessKey, c.AlicloudSecretKey,
|
||||||
|
c.SecurityToken, common.Region(c.AlicloudRegion))
|
||||||
|
|
||||||
|
client.SetBusinessInfo("Packer")
|
||||||
|
if _, err := client.DescribeRegions(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *AlicloudAccessConfig) Prepare(ctx *interpolate.Context) []error {
|
||||||
|
var errs []error
|
||||||
|
if err := c.Config(); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.AlicloudRegion != "" && !c.AlicloudSkipValidation {
|
||||||
|
if c.validateRegion() != nil {
|
||||||
|
errs = append(errs, fmt.Errorf("Unknown alicloud region: %s", c.AlicloudRegion))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(errs) > 0 {
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *AlicloudAccessConfig) Config() error {
|
||||||
|
if c.AlicloudAccessKey == "" {
|
||||||
|
c.AlicloudAccessKey = os.Getenv("ALICLOUD_ACCESS_KEY")
|
||||||
|
}
|
||||||
|
if c.AlicloudSecretKey == "" {
|
||||||
|
c.AlicloudSecretKey = os.Getenv("ALICLOUD_SECRET_KEY")
|
||||||
|
}
|
||||||
|
if c.AlicloudAccessKey == "" || c.AlicloudSecretKey == "" {
|
||||||
|
return fmt.Errorf("ALICLOUD_ACCESS_KEY and ALICLOUD_SECRET_KEY must be set in template file or environment variables.")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *AlicloudAccessConfig) loadAndValidate() error {
|
||||||
|
if err := c.validateRegion(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *AlicloudAccessConfig) validateRegion() error {
|
||||||
|
|
||||||
|
for _, valid := range common.ValidRegions {
|
||||||
|
if c.AlicloudRegion == string(valid) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("Not a valid alicloud region: %s", c.AlicloudRegion)
|
||||||
|
}
|
44
builder/alicloud/ecs/access_config_test.go
Normal file
44
builder/alicloud/ecs/access_config_test.go
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testAlicloudAccessConfig() *AlicloudAccessConfig {
|
||||||
|
return &AlicloudAccessConfig{
|
||||||
|
AlicloudAccessKey: "ak",
|
||||||
|
AlicloudSecretKey: "acs",
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlicloudAccessConfigPrepareRegion(t *testing.T) {
|
||||||
|
c := testAlicloudAccessConfig()
|
||||||
|
c.AlicloudRegion = ""
|
||||||
|
if err := c.Prepare(nil); err != nil {
|
||||||
|
t.Fatalf("shouldn't have err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudRegion = "cn-beijing-3"
|
||||||
|
if err := c.Prepare(nil); err == nil {
|
||||||
|
t.Fatal("should have error")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudRegion = "cn-beijing"
|
||||||
|
if err := c.Prepare(nil); err != nil {
|
||||||
|
t.Fatalf("shouldn't have err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudRegion = "unknown"
|
||||||
|
if err := c.Prepare(nil); err == nil {
|
||||||
|
t.Fatalf("should have err")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudRegion = "unknown"
|
||||||
|
c.AlicloudSkipValidation = true
|
||||||
|
if err := c.Prepare(nil); err != nil {
|
||||||
|
t.Fatalf("shouldn't have err: %s", err)
|
||||||
|
}
|
||||||
|
c.AlicloudSkipValidation = false
|
||||||
|
|
||||||
|
}
|
135
builder/alicloud/ecs/artifact.go
Normal file
135
builder/alicloud/ecs/artifact.go
Normal file
@ -0,0 +1,135 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Artifact struct {
|
||||||
|
// A map of regions to alicloud image IDs.
|
||||||
|
AlicloudImages map[string]string
|
||||||
|
|
||||||
|
// BuilderId is the unique ID for the builder that created this alicloud image
|
||||||
|
BuilderIdValue string
|
||||||
|
|
||||||
|
// Alcloud connection for performing API stuff.
|
||||||
|
Client *ecs.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Artifact) BuilderId() string {
|
||||||
|
return a.BuilderIdValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Artifact) Files() []string {
|
||||||
|
// We have no files
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Artifact) Id() string {
|
||||||
|
parts := make([]string, 0, len(a.AlicloudImages))
|
||||||
|
for region, ecsImageId := range a.AlicloudImages {
|
||||||
|
parts = append(parts, fmt.Sprintf("%s:%s", region, ecsImageId))
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(parts)
|
||||||
|
return strings.Join(parts, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Artifact) String() string {
|
||||||
|
alicloudImageStrings := make([]string, 0, len(a.AlicloudImages))
|
||||||
|
for region, id := range a.AlicloudImages {
|
||||||
|
single := fmt.Sprintf("%s: %s", region, id)
|
||||||
|
alicloudImageStrings = append(alicloudImageStrings, single)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(alicloudImageStrings)
|
||||||
|
return fmt.Sprintf("Alicloud images were created:\n\n%s", strings.Join(alicloudImageStrings, "\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Artifact) State(name string) interface{} {
|
||||||
|
switch name {
|
||||||
|
case "atlas.artifact.metadata":
|
||||||
|
return a.stateAtlasMetadata()
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Artifact) Destroy() error {
|
||||||
|
errors := make([]error, 0)
|
||||||
|
|
||||||
|
for region, imageId := range a.AlicloudImages {
|
||||||
|
log.Printf("Delete alicloud image ID (%s) from region (%s)", imageId, region)
|
||||||
|
|
||||||
|
// Get alicloud image metadata
|
||||||
|
images, _, err := a.Client.DescribeImages(&ecs.DescribeImagesArgs{
|
||||||
|
RegionId: common.Region(region),
|
||||||
|
ImageId: imageId})
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
if len(images) == 0 {
|
||||||
|
err := fmt.Errorf("Error retrieving details for alicloud image(%s), no alicloud images found", imageId)
|
||||||
|
errors = append(errors, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
//Unshared the shared account before destroy
|
||||||
|
sharePermissions, err := a.Client.DescribeImageSharePermission(&ecs.ModifyImageSharePermissionArgs{RegionId: common.Region(region), ImageId: imageId})
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
accountsNumber := len(sharePermissions.Accounts.Account)
|
||||||
|
if accountsNumber > 0 {
|
||||||
|
accounts := make([]string, accountsNumber)
|
||||||
|
for index, account := range sharePermissions.Accounts.Account {
|
||||||
|
accounts[index] = account.AliyunId
|
||||||
|
}
|
||||||
|
err := a.Client.ModifyImageSharePermission(&ecs.ModifyImageSharePermissionArgs{
|
||||||
|
|
||||||
|
RegionId: common.Region(region),
|
||||||
|
ImageId: imageId,
|
||||||
|
RemoveAccount: accounts,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Delete alicloud images
|
||||||
|
if err := a.Client.DeleteImage(common.Region(region), imageId); err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
//Delete the snapshot of this images
|
||||||
|
for _, diskDevices := range images[0].DiskDeviceMappings.DiskDeviceMapping {
|
||||||
|
if err := a.Client.DeleteSnapshot(diskDevices.SnapshotId); err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(errors) > 0 {
|
||||||
|
if len(errors) == 1 {
|
||||||
|
return errors[0]
|
||||||
|
} else {
|
||||||
|
return &packer.MultiError{Errors: errors}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Artifact) stateAtlasMetadata() interface{} {
|
||||||
|
metadata := make(map[string]string)
|
||||||
|
for region, imageId := range a.AlicloudImages {
|
||||||
|
k := fmt.Sprintf("region.%s", region)
|
||||||
|
metadata[k] = imageId
|
||||||
|
}
|
||||||
|
|
||||||
|
return metadata
|
||||||
|
}
|
47
builder/alicloud/ecs/artifact_test.go
Normal file
47
builder/alicloud/ecs/artifact_test.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestArtifact_Impl(t *testing.T) {
|
||||||
|
var _ packer.Artifact = new(Artifact)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestArtifactId(t *testing.T) {
|
||||||
|
expected := `east:foo,west:bar`
|
||||||
|
|
||||||
|
ecsImages := make(map[string]string)
|
||||||
|
ecsImages["east"] = "foo"
|
||||||
|
ecsImages["west"] = "bar"
|
||||||
|
|
||||||
|
a := &Artifact{
|
||||||
|
AlicloudImages: ecsImages,
|
||||||
|
}
|
||||||
|
|
||||||
|
result := a.Id()
|
||||||
|
if result != expected {
|
||||||
|
t.Fatalf("bad: %s", result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestArtifactState_atlasMetadata(t *testing.T) {
|
||||||
|
a := &Artifact{
|
||||||
|
AlicloudImages: map[string]string{
|
||||||
|
"east": "foo",
|
||||||
|
"west": "bar",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
actual := a.State("atlas.artifact.metadata")
|
||||||
|
expected := map[string]string{
|
||||||
|
"region.east": "foo",
|
||||||
|
"region.west": "bar",
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(actual, expected) {
|
||||||
|
t.Fatalf("bad: %#v", actual)
|
||||||
|
}
|
||||||
|
}
|
239
builder/alicloud/ecs/builder.go
Normal file
239
builder/alicloud/ecs/builder.go
Normal file
@ -0,0 +1,239 @@
|
|||||||
|
// The alicloud contains a packer.Builder implementation that
|
||||||
|
// builds ecs images for alicloud.
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/common"
|
||||||
|
"github.com/hashicorp/packer/helper/communicator"
|
||||||
|
"github.com/hashicorp/packer/helper/config"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The unique ID for this builder
|
||||||
|
const BuilderId = "alibaba.alicloud"
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
common.PackerConfig `mapstructure:",squash"`
|
||||||
|
AlicloudAccessConfig `mapstructure:",squash"`
|
||||||
|
AlicloudImageConfig `mapstructure:",squash"`
|
||||||
|
RunConfig `mapstructure:",squash"`
|
||||||
|
|
||||||
|
ctx interpolate.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
type Builder struct {
|
||||||
|
config Config
|
||||||
|
runner multistep.Runner
|
||||||
|
}
|
||||||
|
|
||||||
|
type InstanceNetWork string
|
||||||
|
|
||||||
|
const (
|
||||||
|
ClassicNet = InstanceNetWork("classic")
|
||||||
|
VpcNet = InstanceNetWork("vpc")
|
||||||
|
ALICLOUD_DEFAULT_SHORT_TIMEOUT = 180
|
||||||
|
ALICLOUD_DEFAULT_TIMEOUT = 1800
|
||||||
|
ALICLOUD_DEFAULT_LONG_TIMEOUT = 3600
|
||||||
|
)
|
||||||
|
|
||||||
|
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
||||||
|
err := config.Decode(&b.config, &config.DecodeOpts{
|
||||||
|
Interpolate: true,
|
||||||
|
InterpolateContext: &b.config.ctx,
|
||||||
|
InterpolateFilter: &interpolate.RenderFilter{
|
||||||
|
Exclude: []string{
|
||||||
|
"run_command",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, raws...)
|
||||||
|
b.config.ctx.EnableEnv = true
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accumulate any errors
|
||||||
|
var errs *packer.MultiError
|
||||||
|
errs = packer.MultiErrorAppend(errs, b.config.AlicloudAccessConfig.Prepare(&b.config.ctx)...)
|
||||||
|
errs = packer.MultiErrorAppend(errs, b.config.AlicloudImageConfig.Prepare(&b.config.ctx)...)
|
||||||
|
errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...)
|
||||||
|
|
||||||
|
if errs != nil && len(errs.Errors) > 0 {
|
||||||
|
return nil, errs
|
||||||
|
}
|
||||||
|
|
||||||
|
packer.LogSecretFilter.Set(b.config.AlicloudAccessKey, b.config.AlicloudSecretKey)
|
||||||
|
log.Println(b.config)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
|
||||||
|
|
||||||
|
client, err := b.config.Client()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
state := new(multistep.BasicStateBag)
|
||||||
|
state.Put("config", b.config)
|
||||||
|
state.Put("client", client)
|
||||||
|
state.Put("hook", hook)
|
||||||
|
state.Put("ui", ui)
|
||||||
|
state.Put("networktype", b.chooseNetworkType())
|
||||||
|
var steps []multistep.Step
|
||||||
|
|
||||||
|
// Build the steps
|
||||||
|
steps = []multistep.Step{
|
||||||
|
&stepPreValidate{
|
||||||
|
AlicloudDestImageName: b.config.AlicloudImageName,
|
||||||
|
ForceDelete: b.config.AlicloudImageForceDelete,
|
||||||
|
},
|
||||||
|
&stepCheckAlicloudSourceImage{
|
||||||
|
SourceECSImageId: b.config.AlicloudSourceImage,
|
||||||
|
},
|
||||||
|
&stepConfigAlicloudKeyPair{
|
||||||
|
Debug: b.config.PackerDebug,
|
||||||
|
Comm: &b.config.Comm,
|
||||||
|
DebugKeyPath: fmt.Sprintf("ecs_%s.pem", b.config.PackerBuildName),
|
||||||
|
RegionId: b.config.AlicloudRegion,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if b.chooseNetworkType() == VpcNet {
|
||||||
|
steps = append(steps,
|
||||||
|
&stepConfigAlicloudVPC{
|
||||||
|
VpcId: b.config.VpcId,
|
||||||
|
CidrBlock: b.config.CidrBlock,
|
||||||
|
VpcName: b.config.VpcName,
|
||||||
|
},
|
||||||
|
&stepConfigAlicloudVSwitch{
|
||||||
|
VSwitchId: b.config.VSwitchId,
|
||||||
|
ZoneId: b.config.ZoneId,
|
||||||
|
CidrBlock: b.config.CidrBlock,
|
||||||
|
VSwitchName: b.config.VSwitchName,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
steps = append(steps,
|
||||||
|
&stepConfigAlicloudSecurityGroup{
|
||||||
|
SecurityGroupId: b.config.SecurityGroupId,
|
||||||
|
SecurityGroupName: b.config.SecurityGroupId,
|
||||||
|
RegionId: b.config.AlicloudRegion,
|
||||||
|
},
|
||||||
|
&stepCreateAlicloudInstance{
|
||||||
|
IOOptimized: b.config.IOOptimized,
|
||||||
|
InstanceType: b.config.InstanceType,
|
||||||
|
UserData: b.config.UserData,
|
||||||
|
UserDataFile: b.config.UserDataFile,
|
||||||
|
RegionId: b.config.AlicloudRegion,
|
||||||
|
InternetChargeType: b.config.InternetChargeType,
|
||||||
|
InternetMaxBandwidthOut: b.config.InternetMaxBandwidthOut,
|
||||||
|
InstanceName: b.config.InstanceName,
|
||||||
|
ZoneId: b.config.ZoneId,
|
||||||
|
})
|
||||||
|
if b.chooseNetworkType() == VpcNet {
|
||||||
|
steps = append(steps, &stepConfigAlicloudEIP{
|
||||||
|
AssociatePublicIpAddress: b.config.AssociatePublicIpAddress,
|
||||||
|
RegionId: b.config.AlicloudRegion,
|
||||||
|
InternetChargeType: b.config.InternetChargeType,
|
||||||
|
InternetMaxBandwidthOut: b.config.InternetMaxBandwidthOut,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
steps = append(steps, &stepConfigAlicloudPublicIP{
|
||||||
|
RegionId: b.config.AlicloudRegion,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
steps = append(steps,
|
||||||
|
&stepAttachKeyPair{},
|
||||||
|
&stepRunAlicloudInstance{},
|
||||||
|
&stepMountAlicloudDisk{},
|
||||||
|
&communicator.StepConnect{
|
||||||
|
Config: &b.config.RunConfig.Comm,
|
||||||
|
Host: SSHHost(
|
||||||
|
client,
|
||||||
|
b.config.SSHPrivateIp),
|
||||||
|
SSHConfig: b.config.RunConfig.Comm.SSHConfigFunc(),
|
||||||
|
},
|
||||||
|
&common.StepProvision{},
|
||||||
|
&stepStopAlicloudInstance{
|
||||||
|
ForceStop: b.config.ForceStopInstance,
|
||||||
|
},
|
||||||
|
&stepDeleteAlicloudImageSnapshots{
|
||||||
|
AlicloudImageForceDeleteSnapshots: b.config.AlicloudImageForceDeleteSnapshots,
|
||||||
|
AlicloudImageForceDelete: b.config.AlicloudImageForceDelete,
|
||||||
|
AlicloudImageName: b.config.AlicloudImageName,
|
||||||
|
},
|
||||||
|
&stepCreateAlicloudImage{},
|
||||||
|
&stepRegionCopyAlicloudImage{
|
||||||
|
AlicloudImageDestinationRegions: b.config.AlicloudImageDestinationRegions,
|
||||||
|
AlicloudImageDestinationNames: b.config.AlicloudImageDestinationNames,
|
||||||
|
RegionId: b.config.AlicloudRegion,
|
||||||
|
},
|
||||||
|
&stepShareAlicloudImage{
|
||||||
|
AlicloudImageShareAccounts: b.config.AlicloudImageShareAccounts,
|
||||||
|
AlicloudImageUNShareAccounts: b.config.AlicloudImageUNShareAccounts,
|
||||||
|
RegionId: b.config.AlicloudRegion,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Run!
|
||||||
|
b.runner = common.NewRunner(steps, b.config.PackerConfig, ui)
|
||||||
|
b.runner.Run(state)
|
||||||
|
|
||||||
|
// If there was an error, return that
|
||||||
|
if rawErr, ok := state.GetOk("error"); ok {
|
||||||
|
return nil, rawErr.(error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are no ECS images, then just return
|
||||||
|
if _, ok := state.GetOk("alicloudimages"); !ok {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the artifact and return it
|
||||||
|
artifact := &Artifact{
|
||||||
|
AlicloudImages: state.Get("alicloudimages").(map[string]string),
|
||||||
|
BuilderIdValue: BuilderId,
|
||||||
|
Client: client,
|
||||||
|
}
|
||||||
|
|
||||||
|
return artifact, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) Cancel() {
|
||||||
|
if b.runner != nil {
|
||||||
|
log.Println("Cancelling the step runner...")
|
||||||
|
b.runner.Cancel()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) chooseNetworkType() InstanceNetWork {
|
||||||
|
if b.isVpcNetRequired() {
|
||||||
|
return VpcNet
|
||||||
|
} else {
|
||||||
|
return ClassicNet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) isVpcNetRequired() bool {
|
||||||
|
// UserData and KeyPair only works in VPC
|
||||||
|
return b.isVpcSpecified() || b.isUserDataNeeded() || b.isKeyPairNeeded()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) isVpcSpecified() bool {
|
||||||
|
return b.config.VpcId != "" || b.config.VSwitchId != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) isUserDataNeeded() bool {
|
||||||
|
// Public key setup requires userdata
|
||||||
|
if b.config.RunConfig.Comm.SSHPrivateKeyFile != "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.config.UserData != "" || b.config.UserDataFile != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) isKeyPairNeeded() bool {
|
||||||
|
return b.config.Comm.SSHKeyPairName != "" || b.config.Comm.SSHTemporaryKeyPairName != ""
|
||||||
|
}
|
331
builder/alicloud/ecs/builder_acc_test.go
Normal file
331
builder/alicloud/ecs/builder_acc_test.go
Normal file
@ -0,0 +1,331 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
builderT "github.com/hashicorp/packer/helper/builder/testing"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBuilderAcc_basic(t *testing.T) {
|
||||||
|
builderT.Test(t, builderT.TestCase{
|
||||||
|
PreCheck: func() {
|
||||||
|
testAccPreCheck(t)
|
||||||
|
},
|
||||||
|
Builder: &Builder{},
|
||||||
|
Template: testBuilderAccBasic,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
//func TestBuilderAcc_windows(t *testing.T) {
|
||||||
|
// builderT.Test(t, builderT.TestCase{
|
||||||
|
// PreCheck: func() {
|
||||||
|
// testAccPreCheck(t)
|
||||||
|
// },
|
||||||
|
// Builder: &Builder{},
|
||||||
|
// Template: testBuilderAccWindows,
|
||||||
|
// })
|
||||||
|
//}
|
||||||
|
|
||||||
|
//func TestBuilderAcc_regionCopy(t *testing.T) {
|
||||||
|
// builderT.Test(t, builderT.TestCase{
|
||||||
|
// PreCheck: func() {
|
||||||
|
// testAccPreCheck(t)
|
||||||
|
// },
|
||||||
|
// Builder: &Builder{},
|
||||||
|
// Template: testBuilderAccRegionCopy,
|
||||||
|
// Check: checkRegionCopy([]string{"cn-hangzhou", "cn-shenzhen"}),
|
||||||
|
// })
|
||||||
|
//}
|
||||||
|
|
||||||
|
func TestBuilderAcc_forceDelete(t *testing.T) {
|
||||||
|
// Build the same alicloud image twice, with ecs_image_force_delete on the second run
|
||||||
|
builderT.Test(t, builderT.TestCase{
|
||||||
|
PreCheck: func() {
|
||||||
|
testAccPreCheck(t)
|
||||||
|
},
|
||||||
|
Builder: &Builder{},
|
||||||
|
Template: buildForceDeregisterConfig("false", "delete"),
|
||||||
|
SkipArtifactTeardown: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
builderT.Test(t, builderT.TestCase{
|
||||||
|
PreCheck: func() {
|
||||||
|
testAccPreCheck(t)
|
||||||
|
},
|
||||||
|
Builder: &Builder{},
|
||||||
|
Template: buildForceDeregisterConfig("true", "delete"),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderAcc_ECSImageSharing(t *testing.T) {
|
||||||
|
builderT.Test(t, builderT.TestCase{
|
||||||
|
PreCheck: func() {
|
||||||
|
testAccPreCheck(t)
|
||||||
|
},
|
||||||
|
Builder: &Builder{},
|
||||||
|
Template: testBuilderAccSharing,
|
||||||
|
Check: checkECSImageSharing("1309208528360047"),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderAcc_forceDeleteSnapshot(t *testing.T) {
|
||||||
|
destImageName := "delete"
|
||||||
|
|
||||||
|
// Build the same alicloud image name twice, with force_delete_snapshot on the second run
|
||||||
|
builderT.Test(t, builderT.TestCase{
|
||||||
|
PreCheck: func() {
|
||||||
|
testAccPreCheck(t)
|
||||||
|
},
|
||||||
|
Builder: &Builder{},
|
||||||
|
Template: buildForceDeleteSnapshotConfig("false", destImageName),
|
||||||
|
SkipArtifactTeardown: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Get image data by image image name
|
||||||
|
client, _ := testAliyunClient()
|
||||||
|
images, _, _ := client.DescribeImages(&ecs.DescribeImagesArgs{
|
||||||
|
ImageName: "packer-test-" + destImageName,
|
||||||
|
RegionId: common.Region("cn-beijing")})
|
||||||
|
|
||||||
|
image := images[0]
|
||||||
|
|
||||||
|
// Get snapshot ids for image
|
||||||
|
snapshotIds := []string{}
|
||||||
|
for _, device := range image.DiskDeviceMappings.DiskDeviceMapping {
|
||||||
|
if device.Device != "" && device.SnapshotId != "" {
|
||||||
|
snapshotIds = append(snapshotIds, device.SnapshotId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
builderT.Test(t, builderT.TestCase{
|
||||||
|
PreCheck: func() {
|
||||||
|
testAccPreCheck(t)
|
||||||
|
},
|
||||||
|
Builder: &Builder{},
|
||||||
|
Template: buildForceDeleteSnapshotConfig("true", destImageName),
|
||||||
|
Check: checkSnapshotsDeleted(snapshotIds),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkSnapshotsDeleted(snapshotIds []string) builderT.TestCheckFunc {
|
||||||
|
return func(artifacts []packer.Artifact) error {
|
||||||
|
// Verify the snapshots are gone
|
||||||
|
client, _ := testAliyunClient()
|
||||||
|
snapshotResp, _, err := client.DescribeSnapshots(
|
||||||
|
&ecs.DescribeSnapshotsArgs{RegionId: common.Region("cn-beijing"), SnapshotIds: snapshotIds},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Query snapshot failed %v", err)
|
||||||
|
}
|
||||||
|
if len(snapshotResp) > 0 {
|
||||||
|
return fmt.Errorf("Snapshots weren't successfully deleted by " +
|
||||||
|
"`ecs_image_force_delete_snapshots`")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkECSImageSharing(uid string) builderT.TestCheckFunc {
|
||||||
|
return func(artifacts []packer.Artifact) error {
|
||||||
|
if len(artifacts) > 1 {
|
||||||
|
return fmt.Errorf("more than 1 artifact")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the actual *Artifact pointer so we can access the AMIs directly
|
||||||
|
artifactRaw := artifacts[0]
|
||||||
|
artifact, ok := artifactRaw.(*Artifact)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unknown artifact: %#v", artifactRaw)
|
||||||
|
}
|
||||||
|
|
||||||
|
// describe the image, get block devices with a snapshot
|
||||||
|
client, _ := testAliyunClient()
|
||||||
|
imageSharePermissionResponse, err := client.DescribeImageSharePermission(
|
||||||
|
&ecs.ModifyImageSharePermissionArgs{
|
||||||
|
RegionId: "cn-beijing",
|
||||||
|
ImageId: artifact.AlicloudImages["cn-beijing"],
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error retrieving Image Attributes for ECS Image Artifact (%#v) "+
|
||||||
|
"in ECS Image Sharing Test: %s", artifact, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(imageSharePermissionResponse.Accounts.Account) != 1 &&
|
||||||
|
imageSharePermissionResponse.Accounts.Account[0].AliyunId != uid {
|
||||||
|
return fmt.Errorf("share account is incorrect %d",
|
||||||
|
len(imageSharePermissionResponse.Accounts.Account))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkRegionCopy(regions []string) builderT.TestCheckFunc {
|
||||||
|
return func(artifacts []packer.Artifact) error {
|
||||||
|
if len(artifacts) > 1 {
|
||||||
|
return fmt.Errorf("more than 1 artifact")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the actual *Artifact pointer so we can access the AMIs directly
|
||||||
|
artifactRaw := artifacts[0]
|
||||||
|
artifact, ok := artifactRaw.(*Artifact)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unknown artifact: %#v", artifactRaw)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that we copied to only the regions given
|
||||||
|
regionSet := make(map[string]struct{})
|
||||||
|
for _, r := range regions {
|
||||||
|
regionSet[r] = struct{}{}
|
||||||
|
}
|
||||||
|
for r := range artifact.AlicloudImages {
|
||||||
|
if r == "cn-beijing" {
|
||||||
|
delete(regionSet, r)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := regionSet[r]; !ok {
|
||||||
|
return fmt.Errorf("unknown region: %s", r)
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(regionSet, r)
|
||||||
|
}
|
||||||
|
if len(regionSet) > 0 {
|
||||||
|
return fmt.Errorf("didn't copy to: %#v", regionSet)
|
||||||
|
}
|
||||||
|
client, _ := testAliyunClient()
|
||||||
|
for key, value := range artifact.AlicloudImages {
|
||||||
|
client.WaitForImageReady(common.Region(key), value, 1800)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccPreCheck(t *testing.T) {
|
||||||
|
if v := os.Getenv("ALICLOUD_ACCESS_KEY"); v == "" {
|
||||||
|
t.Fatal("ALICLOUD_ACCESS_KEY must be set for acceptance tests")
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := os.Getenv("ALICLOUD_SECRET_KEY"); v == "" {
|
||||||
|
t.Fatal("ALICLOUD_SECRET_KEY must be set for acceptance tests")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAliyunClient() (*ecs.Client, error) {
|
||||||
|
access := &AlicloudAccessConfig{AlicloudRegion: "cn-beijing"}
|
||||||
|
err := access.Config()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
client, err := access.Client()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const testBuilderAccBasic = `
|
||||||
|
{ "builders": [{
|
||||||
|
"type": "test",
|
||||||
|
"region": "cn-beijing",
|
||||||
|
"instance_type": "ecs.n1.tiny",
|
||||||
|
"source_image":"ubuntu_16_0402_64_40G_base_20170222.vhd",
|
||||||
|
"ssh_username": "ubuntu",
|
||||||
|
"io_optimized":"true",
|
||||||
|
"ssh_username":"root",
|
||||||
|
"image_name": "packer-test_{{timestamp}}"
|
||||||
|
}]
|
||||||
|
}`
|
||||||
|
|
||||||
|
const testBuilderAccRegionCopy = `
|
||||||
|
{
|
||||||
|
"builders": [{
|
||||||
|
"type": "test",
|
||||||
|
"region": "cn-beijing",
|
||||||
|
"instance_type": "ecs.n1.tiny",
|
||||||
|
"source_image":"ubuntu_16_0402_64_40G_base_20170222.vhd",
|
||||||
|
"io_optimized":"true",
|
||||||
|
"ssh_username":"root",
|
||||||
|
"image_name": "packer-test_{{timestamp}}",
|
||||||
|
"image_copy_regions": ["cn-hangzhou", "cn-shenzhen"]
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
const testBuilderAccForceDelete = `
|
||||||
|
{
|
||||||
|
"builders": [{
|
||||||
|
"type": "test",
|
||||||
|
"region": "cn-beijing",
|
||||||
|
"instance_type": "ecs.n1.tiny",
|
||||||
|
"source_image":"ubuntu_16_0402_64_40G_base_20170222.vhd",
|
||||||
|
"io_optimized":"true",
|
||||||
|
"ssh_username":"root",
|
||||||
|
"image_force_delete": "%s",
|
||||||
|
"image_name": "packer-test_%s"
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
const testBuilderAccForceDeleteSnapshot = `
|
||||||
|
{
|
||||||
|
"builders": [{
|
||||||
|
"type": "test",
|
||||||
|
"region": "cn-beijing",
|
||||||
|
"instance_type": "ecs.n1.tiny",
|
||||||
|
"source_image":"ubuntu_16_0402_64_40G_base_20170222.vhd",
|
||||||
|
"io_optimized":"true",
|
||||||
|
"ssh_username":"root",
|
||||||
|
"image_force_delete_snapshots": "%s",
|
||||||
|
"image_force_delete": "%s",
|
||||||
|
"image_name": "packer-test-%s"
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
// share with catsby
|
||||||
|
const testBuilderAccSharing = `
|
||||||
|
{
|
||||||
|
"builders": [{
|
||||||
|
"type": "test",
|
||||||
|
"region": "cn-beijing",
|
||||||
|
"instance_type": "ecs.n1.tiny",
|
||||||
|
"source_image":"ubuntu_16_0402_64_40G_base_20170222.vhd",
|
||||||
|
"io_optimized":"true",
|
||||||
|
"ssh_username":"root",
|
||||||
|
"image_name": "packer-test_{{timestamp}}",
|
||||||
|
"image_share_account":["1309208528360047"]
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
func buildForceDeregisterConfig(val, name string) string {
|
||||||
|
return fmt.Sprintf(testBuilderAccForceDelete, val, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildForceDeleteSnapshotConfig(val, name string) string {
|
||||||
|
return fmt.Sprintf(testBuilderAccForceDeleteSnapshot, val, val, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
const testBuilderAccWindows = `
|
||||||
|
{ "builders": [{
|
||||||
|
"type": "test",
|
||||||
|
"region": "cn-beijing",
|
||||||
|
"instance_type": "ecs.n1.tiny",
|
||||||
|
"source_image":"win2008_64_ent_r2_zh-cn_40G_alibase_20170301.vhd",
|
||||||
|
"io_optimized":"true",
|
||||||
|
"image_force_delete":"true",
|
||||||
|
"communicator": "winrm",
|
||||||
|
"winrm_port": 5985,
|
||||||
|
"winrm_username": "Administrator",
|
||||||
|
"winrm_password": "Test1234",
|
||||||
|
"image_name": "packer-test_{{timestamp}}"
|
||||||
|
}]
|
||||||
|
}`
|
95
builder/alicloud/ecs/builder_test.go
Normal file
95
builder/alicloud/ecs/builder_test.go
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testBuilderConfig() map[string]interface{} {
|
||||||
|
return map[string]interface{}{
|
||||||
|
"access_key": "foo",
|
||||||
|
"secret_key": "bar",
|
||||||
|
"source_image": "foo",
|
||||||
|
"instance_type": "ecs.n1.tiny",
|
||||||
|
"region": "cn-beijing",
|
||||||
|
"ssh_username": "root",
|
||||||
|
"image_name": "foo",
|
||||||
|
"io_optimized": true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilder_ImplementsBuilder(t *testing.T) {
|
||||||
|
var raw interface{}
|
||||||
|
raw = &Builder{}
|
||||||
|
if _, ok := raw.(packer.Builder); !ok {
|
||||||
|
t.Fatalf("Builder should be a builder")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilder_Prepare_BadType(t *testing.T) {
|
||||||
|
b := &Builder{}
|
||||||
|
c := map[string]interface{}{
|
||||||
|
"access_key": []string{},
|
||||||
|
}
|
||||||
|
|
||||||
|
warnings, err := b.Prepare(c)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("prepare should fail")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderPrepare_ECSImageName(t *testing.T) {
|
||||||
|
var b Builder
|
||||||
|
config := testBuilderConfig()
|
||||||
|
|
||||||
|
// Test good
|
||||||
|
config["image_name"] = "ecs.n1.tiny"
|
||||||
|
warnings, err := b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("should not have error: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test bad
|
||||||
|
config["ecs_image_name"] = "foo {{"
|
||||||
|
b = Builder{}
|
||||||
|
warnings, err = b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("should have error")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test bad
|
||||||
|
delete(config, "image_name")
|
||||||
|
b = Builder{}
|
||||||
|
warnings, err = b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("should have error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderPrepare_InvalidKey(t *testing.T) {
|
||||||
|
var b Builder
|
||||||
|
config := testBuilderConfig()
|
||||||
|
|
||||||
|
// Add a random key
|
||||||
|
config["i_should_not_be_valid"] = true
|
||||||
|
warnings, err := b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("should have error")
|
||||||
|
}
|
||||||
|
}
|
100
builder/alicloud/ecs/image_config.go
Normal file
100
builder/alicloud/ecs/image_config.go
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AlicloudDiskDevice struct {
|
||||||
|
DiskName string `mapstructure:"disk_name"`
|
||||||
|
DiskCategory string `mapstructure:"disk_category"`
|
||||||
|
DiskSize int `mapstructure:"disk_size"`
|
||||||
|
SnapshotId string `mapstructure:"disk_snapshot_id"`
|
||||||
|
Description string `mapstructure:"disk_description"`
|
||||||
|
DeleteWithInstance bool `mapstructure:"disk_delete_with_instance"`
|
||||||
|
Device string `mapstructure:"disk_device"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AlicloudDiskDevices struct {
|
||||||
|
ECSImagesDiskMappings []AlicloudDiskDevice `mapstructure:"image_disk_mappings"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AlicloudImageConfig struct {
|
||||||
|
AlicloudImageName string `mapstructure:"image_name"`
|
||||||
|
AlicloudImageVersion string `mapstructure:"image_version"`
|
||||||
|
AlicloudImageDescription string `mapstructure:"image_description"`
|
||||||
|
AlicloudImageShareAccounts []string `mapstructure:"image_share_account"`
|
||||||
|
AlicloudImageUNShareAccounts []string `mapstructure:"image_unshare_account"`
|
||||||
|
AlicloudImageDestinationRegions []string `mapstructure:"image_copy_regions"`
|
||||||
|
AlicloudImageDestinationNames []string `mapstructure:"image_copy_names"`
|
||||||
|
AlicloudImageForceDelete bool `mapstructure:"image_force_delete"`
|
||||||
|
AlicloudImageForceDeleteSnapshots bool `mapstructure:"image_force_delete_snapshots"`
|
||||||
|
AlicloudImageForceDeleteInstances bool `mapstructure:"image_force_delete_instances"`
|
||||||
|
AlicloudImageSkipRegionValidation bool `mapstructure:"skip_region_validation"`
|
||||||
|
AlicloudDiskDevices `mapstructure:",squash"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *AlicloudImageConfig) Prepare(ctx *interpolate.Context) []error {
|
||||||
|
var errs []error
|
||||||
|
if c.AlicloudImageName == "" {
|
||||||
|
errs = append(errs, fmt.Errorf("image_name must be specified"))
|
||||||
|
} else if len(c.AlicloudImageName) < 2 || len(c.AlicloudImageName) > 128 {
|
||||||
|
errs = append(errs, fmt.Errorf("image_name must less than 128 letters and more than 1 letters"))
|
||||||
|
} else if strings.HasPrefix(c.AlicloudImageName, "http://") ||
|
||||||
|
strings.HasPrefix(c.AlicloudImageName, "https://") {
|
||||||
|
errs = append(errs, fmt.Errorf("image_name can't start with 'http://' or 'https://'"))
|
||||||
|
}
|
||||||
|
reg := regexp.MustCompile("\\s+")
|
||||||
|
if reg.FindString(c.AlicloudImageName) != "" {
|
||||||
|
errs = append(errs, fmt.Errorf("image_name can't include spaces"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.AlicloudImageDestinationRegions) > 0 {
|
||||||
|
regionSet := make(map[string]struct{})
|
||||||
|
regions := make([]string, 0, len(c.AlicloudImageDestinationRegions))
|
||||||
|
|
||||||
|
for _, region := range c.AlicloudImageDestinationRegions {
|
||||||
|
// If we already saw the region, then don't look again
|
||||||
|
if _, ok := regionSet[region]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark that we saw the region
|
||||||
|
regionSet[region] = struct{}{}
|
||||||
|
|
||||||
|
if !c.AlicloudImageSkipRegionValidation {
|
||||||
|
// Verify the region is real
|
||||||
|
if valid := validateRegion(region); valid != nil {
|
||||||
|
errs = append(errs, fmt.Errorf("Unknown region: %s", region))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
regions = append(regions, region)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudImageDestinationRegions = regions
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(errs) > 0 {
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateRegion(region string) error {
|
||||||
|
|
||||||
|
for _, valid := range common.ValidRegions {
|
||||||
|
if region == string(valid) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("Not a valid alicloud region: %s", region)
|
||||||
|
}
|
64
builder/alicloud/ecs/image_config_test.go
Normal file
64
builder/alicloud/ecs/image_config_test.go
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testAlicloudImageConfig() *AlicloudImageConfig {
|
||||||
|
return &AlicloudImageConfig{
|
||||||
|
AlicloudImageName: "foo",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestECSImageConfigPrepare_name(t *testing.T) {
|
||||||
|
c := testAlicloudImageConfig()
|
||||||
|
if err := c.Prepare(nil); err != nil {
|
||||||
|
t.Fatalf("shouldn't have err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudImageName = ""
|
||||||
|
if err := c.Prepare(nil); err == nil {
|
||||||
|
t.Fatal("should have error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAMIConfigPrepare_regions(t *testing.T) {
|
||||||
|
c := testAlicloudImageConfig()
|
||||||
|
c.AlicloudImageDestinationRegions = nil
|
||||||
|
if err := c.Prepare(nil); err != nil {
|
||||||
|
t.Fatalf("shouldn't have err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudImageDestinationRegions = regionsToString()
|
||||||
|
if err := c.Prepare(nil); err != nil {
|
||||||
|
t.Fatalf("shouldn't have err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudImageDestinationRegions = []string{"foo"}
|
||||||
|
if err := c.Prepare(nil); err == nil {
|
||||||
|
t.Fatal("should have error")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudImageDestinationRegions = []string{"cn-beijing", "cn-hangzhou", "eu-central-1"}
|
||||||
|
if err := c.Prepare(nil); err != nil {
|
||||||
|
t.Fatalf("bad: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudImageDestinationRegions = []string{"unknow"}
|
||||||
|
c.AlicloudImageSkipRegionValidation = true
|
||||||
|
if err := c.Prepare(nil); err != nil {
|
||||||
|
t.Fatal("shouldn't have error")
|
||||||
|
}
|
||||||
|
c.AlicloudImageSkipRegionValidation = false
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func regionsToString() []string {
|
||||||
|
var regions []string
|
||||||
|
for _, region := range common.ValidRegions {
|
||||||
|
regions = append(regions, string(region))
|
||||||
|
}
|
||||||
|
return regions
|
||||||
|
}
|
22
builder/alicloud/ecs/packer_helper.go
Normal file
22
builder/alicloud/ecs/packer_helper.go
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func message(state multistep.StateBag, module string) {
|
||||||
|
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||||
|
_, halted := state.GetOk(multistep.StateHalted)
|
||||||
|
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
if cancelled || halted {
|
||||||
|
ui.Say(fmt.Sprintf("Deleting %s because of cancellation or error...", module))
|
||||||
|
} else {
|
||||||
|
ui.Say(fmt.Sprintf("Cleaning up '%s'", module))
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
70
builder/alicloud/ecs/run_config.go
Normal file
70
builder/alicloud/ecs/run_config.go
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/common/uuid"
|
||||||
|
"github.com/hashicorp/packer/helper/communicator"
|
||||||
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RunConfig struct {
|
||||||
|
AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address"`
|
||||||
|
ZoneId string `mapstructure:"zone_id"`
|
||||||
|
IOOptimized bool `mapstructure:"io_optimized"`
|
||||||
|
InstanceType string `mapstructure:"instance_type"`
|
||||||
|
Description string `mapstructure:"description"`
|
||||||
|
AlicloudSourceImage string `mapstructure:"source_image"`
|
||||||
|
ForceStopInstance bool `mapstructure:"force_stop_instance"`
|
||||||
|
SecurityGroupId string `mapstructure:"security_group_id"`
|
||||||
|
SecurityGroupName string `mapstructure:"security_group_name"`
|
||||||
|
UserData string `mapstructure:"user_data"`
|
||||||
|
UserDataFile string `mapstructure:"user_data_file"`
|
||||||
|
VpcId string `mapstructure:"vpc_id"`
|
||||||
|
VpcName string `mapstructure:"vpc_name"`
|
||||||
|
CidrBlock string `mapstructure:"vpc_cidr_block"`
|
||||||
|
VSwitchId string `mapstructure:"vswitch_id"`
|
||||||
|
VSwitchName string `mapstructure:"vswitch_id"`
|
||||||
|
InstanceName string `mapstructure:"instance_name"`
|
||||||
|
InternetChargeType string `mapstructure:"internet_charge_type"`
|
||||||
|
InternetMaxBandwidthOut int `mapstructure:"internet_max_bandwidth_out"`
|
||||||
|
|
||||||
|
// Communicator settings
|
||||||
|
Comm communicator.Config `mapstructure:",squash"`
|
||||||
|
SSHPrivateIp bool `mapstructure:"ssh_private_ip"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
|
||||||
|
if c.Comm.SSHKeyPairName == "" && c.Comm.SSHTemporaryKeyPairName == "" &&
|
||||||
|
c.Comm.SSHPrivateKeyFile == "" && c.Comm.SSHPassword == "" && c.Comm.WinRMPassword == "" {
|
||||||
|
|
||||||
|
c.Comm.SSHTemporaryKeyPairName = fmt.Sprintf("packer_%s", uuid.TimeOrderedUUID())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validation
|
||||||
|
errs := c.Comm.Prepare(ctx)
|
||||||
|
if c.AlicloudSourceImage == "" {
|
||||||
|
errs = append(errs, errors.New("A source_image must be specified"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.TrimSpace(c.AlicloudSourceImage) != c.AlicloudSourceImage {
|
||||||
|
errs = append(errs, errors.New("The source_image can't include spaces"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.InstanceType == "" {
|
||||||
|
errs = append(errs, errors.New("An alicloud_instance_type must be specified"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.UserData != "" && c.UserDataFile != "" {
|
||||||
|
errs = append(errs, fmt.Errorf("Only one of user_data or user_data_file can be specified."))
|
||||||
|
} else if c.UserDataFile != "" {
|
||||||
|
if _, err := os.Stat(c.UserDataFile); err != nil {
|
||||||
|
errs = append(errs, fmt.Errorf("user_data_file not found: %s", c.UserDataFile))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return errs
|
||||||
|
}
|
125
builder/alicloud/ecs/run_config_test.go
Normal file
125
builder/alicloud/ecs/run_config_test.go
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/communicator"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testConfig() *RunConfig {
|
||||||
|
return &RunConfig{
|
||||||
|
AlicloudSourceImage: "alicloud_images",
|
||||||
|
InstanceType: "ecs.n1.tiny",
|
||||||
|
Comm: communicator.Config{
|
||||||
|
SSHUsername: "alicloud",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunConfigPrepare(t *testing.T) {
|
||||||
|
c := testConfig()
|
||||||
|
err := c.Prepare(nil)
|
||||||
|
if len(err) > 0 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunConfigPrepare_InstanceType(t *testing.T) {
|
||||||
|
c := testConfig()
|
||||||
|
c.InstanceType = ""
|
||||||
|
if err := c.Prepare(nil); len(err) != 1 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunConfigPrepare_SourceECSImage(t *testing.T) {
|
||||||
|
c := testConfig()
|
||||||
|
c.AlicloudSourceImage = ""
|
||||||
|
if err := c.Prepare(nil); len(err) != 1 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunConfigPrepare_SSHPort(t *testing.T) {
|
||||||
|
c := testConfig()
|
||||||
|
c.Comm.SSHPort = 0
|
||||||
|
if err := c.Prepare(nil); len(err) != 0 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Comm.SSHPort != 22 {
|
||||||
|
t.Fatalf("invalid value: %d", c.Comm.SSHPort)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Comm.SSHPort = 44
|
||||||
|
if err := c.Prepare(nil); len(err) != 0 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Comm.SSHPort != 44 {
|
||||||
|
t.Fatalf("invalid value: %d", c.Comm.SSHPort)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunConfigPrepare_UserData(t *testing.T) {
|
||||||
|
c := testConfig()
|
||||||
|
tf, err := ioutil.TempFile("", "packer")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
defer os.Remove(tf.Name())
|
||||||
|
defer tf.Close()
|
||||||
|
|
||||||
|
c.UserData = "foo"
|
||||||
|
c.UserDataFile = tf.Name()
|
||||||
|
if err := c.Prepare(nil); len(err) != 1 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunConfigPrepare_UserDataFile(t *testing.T) {
|
||||||
|
c := testConfig()
|
||||||
|
if err := c.Prepare(nil); len(err) != 0 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.UserDataFile = "idontexistidontthink"
|
||||||
|
if err := c.Prepare(nil); len(err) != 1 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tf, err := ioutil.TempFile("", "packer")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
defer os.Remove(tf.Name())
|
||||||
|
defer tf.Close()
|
||||||
|
|
||||||
|
c.UserDataFile = tf.Name()
|
||||||
|
if err := c.Prepare(nil); len(err) != 0 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunConfigPrepare_TemporaryKeyPairName(t *testing.T) {
|
||||||
|
c := testConfig()
|
||||||
|
c.Comm.SSHTemporaryKeyPairName = ""
|
||||||
|
if err := c.Prepare(nil); len(err) != 0 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Comm.SSHTemporaryKeyPairName == "" {
|
||||||
|
t.Fatal("keypair name is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Comm.SSHTemporaryKeyPairName = "ssh-key-123"
|
||||||
|
if err := c.Prepare(nil); len(err) != 0 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Comm.SSHTemporaryKeyPairName != "ssh-key-123" {
|
||||||
|
t.Fatal("keypair name does not match")
|
||||||
|
}
|
||||||
|
}
|
23
builder/alicloud/ecs/ssh_helper.go
Normal file
23
builder/alicloud/ecs/ssh_helper.go
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// modified in tests
|
||||||
|
sshHostSleepDuration = time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
type alicloudSSHHelper interface {
|
||||||
|
}
|
||||||
|
|
||||||
|
// SSHHost returns a function that can be given to the SSH communicator
|
||||||
|
func SSHHost(e alicloudSSHHelper, private bool) func(multistep.StateBag) (string, error) {
|
||||||
|
return func(state multistep.StateBag) (string, error) {
|
||||||
|
ipAddress := state.Get("ipaddress").(string)
|
||||||
|
return ipAddress, nil
|
||||||
|
}
|
||||||
|
}
|
75
builder/alicloud/ecs/step_attach_keypair.go
Normal file
75
builder/alicloud/ecs/step_attach_keypair.go
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepAttachKeyPair struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepAttachKeyPair) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
timeoutPoint := time.Now().Add(120 * time.Second)
|
||||||
|
keyPairName := config.Comm.SSHKeyPairName
|
||||||
|
if keyPairName == "" {
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
err := client.AttachKeyPair(&ecs.AttachKeyPairArgs{RegionId: common.Region(config.AlicloudRegion),
|
||||||
|
KeyPairName: keyPairName, InstanceIds: "[\"" + instance.InstanceId + "\"]"})
|
||||||
|
if err != nil {
|
||||||
|
e, _ := err.(*common.Error)
|
||||||
|
if (!(e.Code == "MissingParameter" || e.Code == "DependencyViolation.WindowsInstance" ||
|
||||||
|
e.Code == "InvalidKeyPairName.NotFound" || e.Code == "InvalidRegionId.NotFound")) &&
|
||||||
|
time.Now().Before(timeoutPoint) {
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err := fmt.Errorf("Error attaching keypair %s to instance %s : %s",
|
||||||
|
keyPairName, instance.InstanceId, err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
ui.Message(fmt.Sprintf("Attach keypair %s to instance: %s", keyPairName, instance.InstanceId))
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepAttachKeyPair) Cleanup(state multistep.StateBag) {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
keyPairName := config.Comm.SSHKeyPairName
|
||||||
|
if keyPairName == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.DetachKeyPair(&ecs.DetachKeyPairArgs{RegionId: common.Region(config.AlicloudRegion),
|
||||||
|
KeyPairName: keyPairName, InstanceIds: "[\"" + instance.InstanceId + "\"]"})
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error Detaching keypair %s to instance %s : %s", keyPairName,
|
||||||
|
instance.InstanceId, err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ui.Message(fmt.Sprintf("Detach keypair %s from instance: %s", keyPairName, instance.InstanceId))
|
||||||
|
|
||||||
|
}
|
60
builder/alicloud/ecs/step_check_source_image.go
Normal file
60
builder/alicloud/ecs/step_check_source_image.go
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepCheckAlicloudSourceImage struct {
|
||||||
|
SourceECSImageId string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepCheckAlicloudSourceImage) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
args := &ecs.DescribeImagesArgs{
|
||||||
|
RegionId: common.Region(config.AlicloudRegion),
|
||||||
|
ImageId: config.AlicloudSourceImage,
|
||||||
|
}
|
||||||
|
args.PageSize = 50
|
||||||
|
images, _, err := client.DescribeImages(args)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error querying alicloud image: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describe markerplace image
|
||||||
|
args.ImageOwnerAlias = ecs.ImageOwnerMarketplace
|
||||||
|
imageMarkets, _, err := client.DescribeImages(args)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error querying alicloud marketplace image: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
if len(imageMarkets) > 0 {
|
||||||
|
images = append(images, imageMarkets...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(images) == 0 {
|
||||||
|
err := fmt.Errorf("No alicloud image was found matching filters: %v", config.AlicloudSourceImage)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
ui.Message(fmt.Sprintf("Found image ID: %s", images[0].ImageId))
|
||||||
|
|
||||||
|
state.Put("source_image", &images[0])
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepCheckAlicloudSourceImage) Cleanup(multistep.StateBag) {}
|
82
builder/alicloud/ecs/step_config_eip.go
Normal file
82
builder/alicloud/ecs/step_config_eip.go
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepConfigAlicloudEIP struct {
|
||||||
|
AssociatePublicIpAddress bool
|
||||||
|
RegionId string
|
||||||
|
InternetChargeType string
|
||||||
|
InternetMaxBandwidthOut int
|
||||||
|
allocatedId string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudEIP) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
ui.Say("Allocating eip")
|
||||||
|
ipaddress, allocateId, err := client.AllocateEipAddress(&ecs.AllocateEipAddressArgs{
|
||||||
|
RegionId: common.Region(s.RegionId), InternetChargeType: common.InternetChargeType(s.InternetChargeType),
|
||||||
|
Bandwidth: s.InternetMaxBandwidthOut,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Error allocating eip: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
s.allocatedId = allocateId
|
||||||
|
if err = client.WaitForEip(common.Region(s.RegionId), allocateId,
|
||||||
|
ecs.EipStatusAvailable, ALICLOUD_DEFAULT_SHORT_TIMEOUT); err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Error allocating eip: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = client.AssociateEipAddress(allocateId, instance.InstanceId); err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Error binding eip: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = client.WaitForEip(common.Region(s.RegionId), allocateId,
|
||||||
|
ecs.EipStatusInUse, ALICLOUD_DEFAULT_SHORT_TIMEOUT); err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Error associating eip: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
ui.Say(fmt.Sprintf("Allocated eip %s", ipaddress))
|
||||||
|
state.Put("ipaddress", ipaddress)
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudEIP) Cleanup(state multistep.StateBag) {
|
||||||
|
if len(s.allocatedId) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
message(state, "EIP")
|
||||||
|
|
||||||
|
if err := client.UnassociateEipAddress(s.allocatedId, instance.InstanceId); err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Failed to unassociate eip."))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := client.WaitForEip(common.Region(s.RegionId), s.allocatedId, ecs.EipStatusAvailable, ALICLOUD_DEFAULT_SHORT_TIMEOUT); err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Timeout while unassociating eip."))
|
||||||
|
}
|
||||||
|
if err := client.ReleaseEipAddress(s.allocatedId); err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Failed to release eip."))
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
136
builder/alicloud/ecs/step_config_key_pair.go
Normal file
136
builder/alicloud/ecs/step_config_key_pair.go
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/communicator"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepConfigAlicloudKeyPair struct {
|
||||||
|
Debug bool
|
||||||
|
Comm *communicator.Config
|
||||||
|
DebugKeyPath string
|
||||||
|
RegionId string
|
||||||
|
|
||||||
|
keyName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudKeyPair) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
if s.Comm.SSHPrivateKeyFile != "" {
|
||||||
|
ui.Say("Using existing SSH private key")
|
||||||
|
privateKeyBytes, err := ioutil.ReadFile(s.Comm.SSHPrivateKeyFile)
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", fmt.Errorf(
|
||||||
|
"Error loading configured private key file: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Comm.SSHPrivateKey = privateKeyBytes
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.Comm.SSHAgentAuth && s.Comm.SSHKeyPairName == "" {
|
||||||
|
ui.Say("Using SSH Agent with key pair in source image")
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.Comm.SSHAgentAuth && s.Comm.SSHKeyPairName != "" {
|
||||||
|
ui.Say(fmt.Sprintf("Using SSH Agent for existing key pair %s", s.Comm.SSHKeyPairName))
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.Comm.SSHTemporaryKeyPairName == "" {
|
||||||
|
ui.Say("Not using temporary keypair")
|
||||||
|
s.Comm.SSHKeyPairName = ""
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
|
||||||
|
ui.Say(fmt.Sprintf("Creating temporary keypair: %s", s.Comm.SSHTemporaryKeyPairName))
|
||||||
|
keyResp, err := client.CreateKeyPair(&ecs.CreateKeyPairArgs{
|
||||||
|
KeyPairName: s.Comm.SSHTemporaryKeyPairName,
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", fmt.Errorf("Error creating temporary keypair: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the keyname so we know to delete it later
|
||||||
|
s.keyName = s.Comm.SSHTemporaryKeyPairName
|
||||||
|
|
||||||
|
// Set some state data for use in future steps
|
||||||
|
s.Comm.SSHKeyPairName = s.keyName
|
||||||
|
s.Comm.SSHPrivateKey = []byte(keyResp.PrivateKeyBody)
|
||||||
|
|
||||||
|
// If we're in debug mode, output the private key to the working
|
||||||
|
// directory.
|
||||||
|
if s.Debug {
|
||||||
|
ui.Message(fmt.Sprintf("Saving key for debug purposes: %s", s.DebugKeyPath))
|
||||||
|
f, err := os.Create(s.DebugKeyPath)
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", fmt.Errorf("Error saving debug key: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
// Write the key out
|
||||||
|
if _, err := f.Write([]byte(keyResp.PrivateKeyBody)); err != nil {
|
||||||
|
state.Put("error", fmt.Errorf("Error saving debug key: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chmod it so that it is SSH ready
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
|
if err := f.Chmod(0600); err != nil {
|
||||||
|
state.Put("error", fmt.Errorf("Error setting permissions of debug key: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudKeyPair) Cleanup(state multistep.StateBag) {
|
||||||
|
// If no key name is set, then we never created it, so just return
|
||||||
|
// If we used an SSH private key file, do not go about deleting
|
||||||
|
// keypairs
|
||||||
|
if s.Comm.SSHPrivateKeyFile != "" || (s.Comm.SSHKeyPairName == "" && s.keyName == "") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
// Remove the keypair
|
||||||
|
ui.Say("Deleting temporary keypair...")
|
||||||
|
err := client.DeleteKeyPairs(&ecs.DeleteKeyPairsArgs{
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
KeyPairNames: "[\"" + s.keyName + "\"]",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
ui.Error(fmt.Sprintf(
|
||||||
|
"Error cleaning up keypair. Please delete the key manually: %s", s.keyName))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also remove the physical key if we're debugging.
|
||||||
|
if s.Debug {
|
||||||
|
if err := os.Remove(s.DebugKeyPath); err != nil {
|
||||||
|
ui.Error(fmt.Sprintf(
|
||||||
|
"Error removing debug key '%s': %s", s.DebugKeyPath, err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
36
builder/alicloud/ecs/step_config_public_ip.go
Normal file
36
builder/alicloud/ecs/step_config_public_ip.go
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepConfigAlicloudPublicIP struct {
|
||||||
|
publicIPAddress string
|
||||||
|
RegionId string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudPublicIP) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
|
||||||
|
ipaddress, err := client.AllocatePublicIpAddress(instance.InstanceId)
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Error allocating public ip: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
s.publicIPAddress = ipaddress
|
||||||
|
ui.Say(fmt.Sprintf("Allocated public ip address %s.", ipaddress))
|
||||||
|
state.Put("ipaddress", ipaddress)
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudPublicIP) Cleanup(state multistep.StateBag) {
|
||||||
|
|
||||||
|
}
|
138
builder/alicloud/ecs/step_config_security_group.go
Normal file
138
builder/alicloud/ecs/step_config_security_group.go
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepConfigAlicloudSecurityGroup struct {
|
||||||
|
SecurityGroupId string
|
||||||
|
SecurityGroupName string
|
||||||
|
Description string
|
||||||
|
VpcId string
|
||||||
|
RegionId string
|
||||||
|
isCreate bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudSecurityGroup) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
networkType := state.Get("networktype").(InstanceNetWork)
|
||||||
|
|
||||||
|
var securityGroupItems []ecs.SecurityGroupItemType
|
||||||
|
var err error
|
||||||
|
if len(s.SecurityGroupId) != 0 {
|
||||||
|
if networkType == VpcNet {
|
||||||
|
vpcId := state.Get("vpcid").(string)
|
||||||
|
securityGroupItems, _, err = client.DescribeSecurityGroups(&ecs.DescribeSecurityGroupsArgs{
|
||||||
|
VpcId: vpcId,
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
securityGroupItems, _, err = client.DescribeSecurityGroups(&ecs.DescribeSecurityGroupsArgs{
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Failed querying security group: %s", err))
|
||||||
|
state.Put("error", err)
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
for _, securityGroupItem := range securityGroupItems {
|
||||||
|
if securityGroupItem.SecurityGroupId == s.SecurityGroupId {
|
||||||
|
state.Put("securitygroupid", s.SecurityGroupId)
|
||||||
|
s.isCreate = false
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.isCreate = false
|
||||||
|
message := fmt.Sprintf("The specified security group {%s} doesn't exist.", s.SecurityGroupId)
|
||||||
|
state.Put("error", errors.New(message))
|
||||||
|
ui.Say(message)
|
||||||
|
return multistep.ActionHalt
|
||||||
|
|
||||||
|
}
|
||||||
|
var securityGroupId string
|
||||||
|
ui.Say("Creating security groups...")
|
||||||
|
if networkType == VpcNet {
|
||||||
|
vpcId := state.Get("vpcid").(string)
|
||||||
|
securityGroupId, err = client.CreateSecurityGroup(&ecs.CreateSecurityGroupArgs{
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
SecurityGroupName: s.SecurityGroupName,
|
||||||
|
VpcId: vpcId,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
securityGroupId, err = client.CreateSecurityGroup(&ecs.CreateSecurityGroupArgs{
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
SecurityGroupName: s.SecurityGroupName,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Failed creating security group %s.", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
state.Put("securitygroupid", securityGroupId)
|
||||||
|
s.isCreate = true
|
||||||
|
s.SecurityGroupId = securityGroupId
|
||||||
|
err = client.AuthorizeSecurityGroupEgress(&ecs.AuthorizeSecurityGroupEgressArgs{
|
||||||
|
SecurityGroupId: securityGroupId,
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
IpProtocol: ecs.IpProtocolAll,
|
||||||
|
PortRange: "-1/-1",
|
||||||
|
NicType: ecs.NicTypeInternet,
|
||||||
|
DestCidrIp: "0.0.0.0/0", //The input parameter "DestGroupId" or "DestCidrIp" cannot be both blank.
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Failed authorizing security group: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
err = client.AuthorizeSecurityGroup(&ecs.AuthorizeSecurityGroupArgs{
|
||||||
|
SecurityGroupId: securityGroupId,
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
IpProtocol: ecs.IpProtocolAll,
|
||||||
|
PortRange: "-1/-1",
|
||||||
|
NicType: ecs.NicTypeInternet,
|
||||||
|
SourceCidrIp: "0.0.0.0/0", //The input parameter "SourceGroupId" or "SourceCidrIp" cannot be both blank.
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Failed authorizing security group: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudSecurityGroup) Cleanup(state multistep.StateBag) {
|
||||||
|
if !s.isCreate {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
message(state, "security group")
|
||||||
|
timeoutPoint := time.Now().Add(120 * time.Second)
|
||||||
|
for {
|
||||||
|
if err := client.DeleteSecurityGroup(common.Region(s.RegionId), s.SecurityGroupId); err != nil {
|
||||||
|
e, _ := err.(*common.Error)
|
||||||
|
if e.Code == "DependencyViolation" && time.Now().Before(timeoutPoint) {
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ui.Error(fmt.Sprintf("Failed to delete security group, it may still be around: %s", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
98
builder/alicloud/ecs/step_config_vpc.go
Normal file
98
builder/alicloud/ecs/step_config_vpc.go
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepConfigAlicloudVPC struct {
|
||||||
|
VpcId string
|
||||||
|
CidrBlock string //192.168.0.0/16 or 172.16.0.0/16 (default)
|
||||||
|
VpcName string
|
||||||
|
isCreate bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudVPC) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
if len(s.VpcId) != 0 {
|
||||||
|
vpcs, _, err := client.DescribeVpcs(&ecs.DescribeVpcsArgs{
|
||||||
|
VpcId: s.VpcId,
|
||||||
|
RegionId: common.Region(config.AlicloudRegion),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Failed querying vpcs: %s", err))
|
||||||
|
state.Put("error", err)
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
if len(vpcs) > 0 {
|
||||||
|
vpc := vpcs[0]
|
||||||
|
state.Put("vpcid", vpc.VpcId)
|
||||||
|
s.isCreate = false
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
message := fmt.Sprintf("The specified vpc {%s} doesn't exist.", s.VpcId)
|
||||||
|
state.Put("error", errors.New(message))
|
||||||
|
ui.Say(message)
|
||||||
|
return multistep.ActionHalt
|
||||||
|
|
||||||
|
}
|
||||||
|
ui.Say("Creating vpc")
|
||||||
|
vpc, err := client.CreateVpc(&ecs.CreateVpcArgs{
|
||||||
|
RegionId: common.Region(config.AlicloudRegion),
|
||||||
|
CidrBlock: s.CidrBlock,
|
||||||
|
VpcName: s.VpcName,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Failed creating vpc: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
err = client.WaitForVpcAvailable(common.Region(config.AlicloudRegion), vpc.VpcId, ALICLOUD_DEFAULT_SHORT_TIMEOUT)
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Failed waiting for vpc to become available: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
state.Put("vpcid", vpc.VpcId)
|
||||||
|
s.isCreate = true
|
||||||
|
s.VpcId = vpc.VpcId
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudVPC) Cleanup(state multistep.StateBag) {
|
||||||
|
if !s.isCreate {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
message(state, "VPC")
|
||||||
|
timeoutPoint := time.Now().Add(60 * time.Second)
|
||||||
|
for {
|
||||||
|
if err := client.DeleteVpc(s.VpcId); err != nil {
|
||||||
|
e, _ := err.(*common.Error)
|
||||||
|
if (e.Code == "DependencyViolation.Instance" || e.Code == "DependencyViolation.RouteEntry" ||
|
||||||
|
e.Code == "DependencyViolation.VSwitch" ||
|
||||||
|
e.Code == "DependencyViolation.SecurityGroup" ||
|
||||||
|
e.Code == "Forbbiden") && time.Now().Before(timeoutPoint) {
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ui.Error(fmt.Sprintf("Error deleting vpc, it may still be around: %s", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
149
builder/alicloud/ecs/step_config_vswitch.go
Normal file
149
builder/alicloud/ecs/step_config_vswitch.go
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepConfigAlicloudVSwitch struct {
|
||||||
|
VSwitchId string
|
||||||
|
ZoneId string
|
||||||
|
isCreate bool
|
||||||
|
CidrBlock string
|
||||||
|
VSwitchName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudVSwitch) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
vpcId := state.Get("vpcid").(string)
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
|
||||||
|
if len(s.VSwitchId) != 0 {
|
||||||
|
vswitchs, _, err := client.DescribeVSwitches(&ecs.DescribeVSwitchesArgs{
|
||||||
|
VpcId: vpcId,
|
||||||
|
VSwitchId: s.VSwitchId,
|
||||||
|
ZoneId: s.ZoneId,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Failed querying vswitch: %s", err))
|
||||||
|
state.Put("error", err)
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
if len(vswitchs) > 0 {
|
||||||
|
vswitch := vswitchs[0]
|
||||||
|
state.Put("vswitchid", vswitch.VSwitchId)
|
||||||
|
s.isCreate = false
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
s.isCreate = false
|
||||||
|
message := fmt.Sprintf("The specified vswitch {%s} doesn't exist.", s.VSwitchId)
|
||||||
|
state.Put("error", errors.New(message))
|
||||||
|
ui.Say(message)
|
||||||
|
return multistep.ActionHalt
|
||||||
|
|
||||||
|
}
|
||||||
|
if s.ZoneId == "" {
|
||||||
|
|
||||||
|
zones, err := client.DescribeZones(common.Region(config.AlicloudRegion))
|
||||||
|
if err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Query for available zones failed: %s", err))
|
||||||
|
state.Put("error", err)
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
var instanceTypes []string
|
||||||
|
for _, zone := range zones {
|
||||||
|
isVSwitchSupported := false
|
||||||
|
for _, resourceType := range zone.AvailableResourceCreation.ResourceTypes {
|
||||||
|
if resourceType == ecs.ResourceTypeVSwitch {
|
||||||
|
isVSwitchSupported = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if isVSwitchSupported {
|
||||||
|
for _, instanceType := range zone.AvailableInstanceTypes.InstanceTypes {
|
||||||
|
if instanceType == config.InstanceType {
|
||||||
|
s.ZoneId = zone.ZoneId
|
||||||
|
break
|
||||||
|
}
|
||||||
|
instanceTypes = append(instanceTypes, instanceType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.ZoneId == "" {
|
||||||
|
if len(instanceTypes) > 0 {
|
||||||
|
ui.Say(fmt.Sprintf("The instance type %s isn't available in this region."+
|
||||||
|
"\n You can either change the instance to one of following: %v \n"+
|
||||||
|
"or choose another region.", config.InstanceType, instanceTypes))
|
||||||
|
|
||||||
|
state.Put("error", fmt.Errorf("The instance type %s isn't available in this region."+
|
||||||
|
"\n You can either change the instance to one of following: %v \n"+
|
||||||
|
"or choose another region.", config.InstanceType, instanceTypes))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
} else {
|
||||||
|
ui.Say(fmt.Sprintf("The instance type %s isn't available in this region."+
|
||||||
|
"\n You can change to other regions.", config.InstanceType))
|
||||||
|
|
||||||
|
state.Put("error", fmt.Errorf("The instance type %s isn't available in this region."+
|
||||||
|
"\n You can change to other regions.", config.InstanceType))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if config.CidrBlock == "" {
|
||||||
|
s.CidrBlock = "172.16.0.0/24" //use the default CirdBlock
|
||||||
|
}
|
||||||
|
ui.Say("Creating vswitch...")
|
||||||
|
vswitchId, err := client.CreateVSwitch(&ecs.CreateVSwitchArgs{
|
||||||
|
CidrBlock: s.CidrBlock,
|
||||||
|
ZoneId: s.ZoneId,
|
||||||
|
VpcId: vpcId,
|
||||||
|
VSwitchName: s.VSwitchName,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Create vswitch failed %v", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
if err := client.WaitForVSwitchAvailable(vpcId, s.VSwitchId, ALICLOUD_DEFAULT_TIMEOUT); err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(fmt.Sprintf("Timeout waiting for vswitch to become available: %v", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
state.Put("vswitchid", vswitchId)
|
||||||
|
s.isCreate = true
|
||||||
|
s.VSwitchId = vswitchId
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudVSwitch) Cleanup(state multistep.StateBag) {
|
||||||
|
if !s.isCreate {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
message(state, "vSwitch")
|
||||||
|
timeoutPoint := time.Now().Add(10 * time.Second)
|
||||||
|
for {
|
||||||
|
if err := client.DeleteVSwitch(s.VSwitchId); err != nil {
|
||||||
|
e, _ := err.(*common.Error)
|
||||||
|
if (e.Code == "IncorrectVSwitchStatus" || e.Code == "DependencyViolation" ||
|
||||||
|
e.Code == "DependencyViolation.HaVip" ||
|
||||||
|
e.Code == "IncorrectRouteEntryStatus") && time.Now().Before(timeoutPoint) {
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ui.Error(fmt.Sprintf("Error deleting vswitch, it may still be around: %s", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
95
builder/alicloud/ecs/step_create_image.go
Normal file
95
builder/alicloud/ecs/step_create_image.go
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepCreateAlicloudImage struct {
|
||||||
|
image *ecs.ImageType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepCreateAlicloudImage) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
// Create the alicloud image
|
||||||
|
ui.Say(fmt.Sprintf("Creating image: %s", config.AlicloudImageName))
|
||||||
|
var imageId string
|
||||||
|
var err error
|
||||||
|
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
imageId, err = client.CreateImage(&ecs.CreateImageArgs{
|
||||||
|
RegionId: common.Region(config.AlicloudRegion),
|
||||||
|
InstanceId: instance.InstanceId,
|
||||||
|
ImageName: config.AlicloudImageName,
|
||||||
|
ImageVersion: config.AlicloudImageVersion,
|
||||||
|
Description: config.AlicloudImageDescription})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error creating image: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
err = client.WaitForImageReady(common.Region(config.AlicloudRegion),
|
||||||
|
imageId, ALICLOUD_DEFAULT_LONG_TIMEOUT)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Timeout waiting for image to be created: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
images, _, err := client.DescribeImages(&ecs.DescribeImagesArgs{
|
||||||
|
RegionId: common.Region(config.AlicloudRegion),
|
||||||
|
ImageId: imageId})
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error querying created image: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(images) == 0 {
|
||||||
|
err := fmt.Errorf("Unable to find created image: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
s.image = &images[0]
|
||||||
|
|
||||||
|
state.Put("alicloudimage", imageId)
|
||||||
|
alicloudImages := make(map[string]string)
|
||||||
|
alicloudImages[config.AlicloudRegion] = images[0].ImageId
|
||||||
|
state.Put("alicloudimages", alicloudImages)
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepCreateAlicloudImage) Cleanup(state multistep.StateBag) {
|
||||||
|
if s.image == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||||
|
_, halted := state.GetOk(multistep.StateHalted)
|
||||||
|
if !cancelled && !halted {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
|
||||||
|
ui.Say("Deleting the image because of cancellation or error...")
|
||||||
|
if err := client.DeleteImage(common.Region(config.AlicloudRegion), s.image.ImageId); err != nil {
|
||||||
|
ui.Error(fmt.Sprintf("Error deleting image, it may still be around: %s", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
164
builder/alicloud/ecs/step_create_instance.go
Normal file
164
builder/alicloud/ecs/step_create_instance.go
Normal file
@ -0,0 +1,164 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepCreateAlicloudInstance struct {
|
||||||
|
IOOptimized bool
|
||||||
|
InstanceType string
|
||||||
|
UserData string
|
||||||
|
UserDataFile string
|
||||||
|
instanceId string
|
||||||
|
RegionId string
|
||||||
|
InternetChargeType string
|
||||||
|
InternetMaxBandwidthOut int
|
||||||
|
InstanceName string
|
||||||
|
ZoneId string
|
||||||
|
instance *ecs.InstanceAttributesType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepCreateAlicloudInstance) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
source_image := state.Get("source_image").(*ecs.ImageType)
|
||||||
|
network_type := state.Get("networktype").(InstanceNetWork)
|
||||||
|
securityGroupId := state.Get("securitygroupid").(string)
|
||||||
|
var instanceId string
|
||||||
|
var err error
|
||||||
|
|
||||||
|
ioOptimized := ecs.IoOptimizedNone
|
||||||
|
if s.IOOptimized {
|
||||||
|
ioOptimized = ecs.IoOptimizedOptimized
|
||||||
|
}
|
||||||
|
password := config.Comm.SSHPassword
|
||||||
|
if password == "" && config.Comm.WinRMPassword != "" {
|
||||||
|
password = config.Comm.WinRMPassword
|
||||||
|
}
|
||||||
|
ui.Say("Creating instance.")
|
||||||
|
if network_type == VpcNet {
|
||||||
|
userData, err := s.getUserData(state)
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
vswitchId := state.Get("vswitchid").(string)
|
||||||
|
instanceId, err = client.CreateInstance(&ecs.CreateInstanceArgs{
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
ImageId: source_image.ImageId,
|
||||||
|
InstanceType: s.InstanceType,
|
||||||
|
InternetChargeType: common.InternetChargeType(s.InternetChargeType), //"PayByTraffic",
|
||||||
|
InternetMaxBandwidthOut: s.InternetMaxBandwidthOut,
|
||||||
|
UserData: userData,
|
||||||
|
IoOptimized: ioOptimized,
|
||||||
|
VSwitchId: vswitchId,
|
||||||
|
SecurityGroupId: securityGroupId,
|
||||||
|
InstanceName: s.InstanceName,
|
||||||
|
Password: password,
|
||||||
|
ZoneId: s.ZoneId,
|
||||||
|
DataDisk: diskDeviceToDiskType(config.AlicloudImageConfig.ECSImagesDiskMappings),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error creating instance: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if s.InstanceType == "" {
|
||||||
|
s.InstanceType = "PayByTraffic"
|
||||||
|
}
|
||||||
|
if s.InternetMaxBandwidthOut == 0 {
|
||||||
|
s.InternetMaxBandwidthOut = 5
|
||||||
|
}
|
||||||
|
instanceId, err = client.CreateInstance(&ecs.CreateInstanceArgs{
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
ImageId: source_image.ImageId,
|
||||||
|
InstanceType: s.InstanceType,
|
||||||
|
InternetChargeType: common.InternetChargeType(s.InternetChargeType), //"PayByTraffic",
|
||||||
|
InternetMaxBandwidthOut: s.InternetMaxBandwidthOut,
|
||||||
|
IoOptimized: ioOptimized,
|
||||||
|
SecurityGroupId: securityGroupId,
|
||||||
|
InstanceName: s.InstanceName,
|
||||||
|
Password: password,
|
||||||
|
ZoneId: s.ZoneId,
|
||||||
|
DataDisk: diskDeviceToDiskType(config.AlicloudImageConfig.ECSImagesDiskMappings),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error creating instance: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = client.WaitForInstance(instanceId, ecs.Stopped, ALICLOUD_DEFAULT_TIMEOUT)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error creating instance: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
instance, err := client.DescribeInstanceAttribute(instanceId)
|
||||||
|
if err != nil {
|
||||||
|
ui.Say(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
s.instance = instance
|
||||||
|
state.Put("instance", instance)
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepCreateAlicloudInstance) Cleanup(state multistep.StateBag) {
|
||||||
|
if s.instance == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
message(state, "instance")
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
err := client.DeleteInstance(s.instance.InstanceId)
|
||||||
|
if err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Failed to clean up instance %s: %v", s.instance.InstanceId, err.Error()))
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepCreateAlicloudInstance) getUserData(state multistep.StateBag) (string, error) {
|
||||||
|
userData := s.UserData
|
||||||
|
if s.UserDataFile != "" {
|
||||||
|
data, err := ioutil.ReadFile(s.UserDataFile)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
userData = string(data)
|
||||||
|
}
|
||||||
|
log.Printf(userData)
|
||||||
|
return userData, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func diskDeviceToDiskType(diskDevices []AlicloudDiskDevice) []ecs.DataDiskType {
|
||||||
|
result := make([]ecs.DataDiskType, len(diskDevices))
|
||||||
|
for _, diskDevice := range diskDevices {
|
||||||
|
result = append(result, ecs.DataDiskType{
|
||||||
|
DiskName: diskDevice.DiskName,
|
||||||
|
Category: ecs.DiskCategory(diskDevice.DiskCategory),
|
||||||
|
Size: diskDevice.DiskSize,
|
||||||
|
SnapshotId: diskDevice.SnapshotId,
|
||||||
|
Description: diskDevice.Description,
|
||||||
|
DeleteWithInstance: diskDevice.DeleteWithInstance,
|
||||||
|
Device: diskDevice.Device,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
64
builder/alicloud/ecs/step_delete_images_snapshots.go
Normal file
64
builder/alicloud/ecs/step_delete_images_snapshots.go
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepDeleteAlicloudImageSnapshots struct {
|
||||||
|
AlicloudImageForceDelete bool
|
||||||
|
AlicloudImageForceDeleteSnapshots bool
|
||||||
|
AlicloudImageName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepDeleteAlicloudImageSnapshots) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
ui.Say("Deleting image snapshots.")
|
||||||
|
// Check for force delete
|
||||||
|
if s.AlicloudImageForceDelete {
|
||||||
|
images, _, err := client.DescribeImages(&ecs.DescribeImagesArgs{
|
||||||
|
RegionId: common.Region(config.AlicloudRegion),
|
||||||
|
ImageName: s.AlicloudImageName,
|
||||||
|
})
|
||||||
|
if len(images) < 1 {
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
for _, image := range images {
|
||||||
|
if image.ImageOwnerAlias != string(ecs.ImageOwnerSelf) {
|
||||||
|
log.Printf("You can only delete instances based on customized images %s ", image.ImageId)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err = client.DeleteImage(common.Region(config.AlicloudRegion), image.ImageId)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Failed to delete image: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
if s.AlicloudImageForceDeleteSnapshots {
|
||||||
|
for _, diskDevice := range image.DiskDeviceMappings.DiskDeviceMapping {
|
||||||
|
if err := client.DeleteSnapshot(diskDevice.SnapshotId); err != nil {
|
||||||
|
err := fmt.Errorf("Deleting ECS snapshot failed: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepDeleteAlicloudImageSnapshots) Cleanup(state multistep.StateBag) {
|
||||||
|
}
|
72
builder/alicloud/ecs/step_mount_disk.go
Normal file
72
builder/alicloud/ecs/step_mount_disk.go
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepMountAlicloudDisk struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepMountAlicloudDisk) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
alicloudDiskDevices := config.ECSImagesDiskMappings
|
||||||
|
if len(config.ECSImagesDiskMappings) == 0 {
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
ui.Say("Mounting disks.")
|
||||||
|
disks, _, err := client.DescribeDisks(&ecs.DescribeDisksArgs{InstanceId: instance.InstanceId,
|
||||||
|
RegionId: instance.RegionId})
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error querying disks: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
for _, disk := range disks {
|
||||||
|
if disk.Status == ecs.DiskStatusAvailable {
|
||||||
|
if err := client.AttachDisk(&ecs.AttachDiskArgs{DiskId: disk.DiskId,
|
||||||
|
InstanceId: instance.InstanceId,
|
||||||
|
Device: getDevice(&disk, alicloudDiskDevices),
|
||||||
|
}); err != nil {
|
||||||
|
err := fmt.Errorf("Error mounting disks: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, disk := range disks {
|
||||||
|
if err := client.WaitForDisk(instance.RegionId, disk.DiskId, ecs.DiskStatusInUse, ALICLOUD_DEFAULT_SHORT_TIMEOUT); err != nil {
|
||||||
|
err := fmt.Errorf("Timeout waiting for mount: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ui.Say("Finished mounting disks.")
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepMountAlicloudDisk) Cleanup(state multistep.StateBag) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDevice(disk *ecs.DiskItemType, diskDevices []AlicloudDiskDevice) string {
|
||||||
|
if disk.Device != "" {
|
||||||
|
return disk.Device
|
||||||
|
}
|
||||||
|
for _, alicloudDiskDevice := range diskDevices {
|
||||||
|
if alicloudDiskDevice.DiskName == disk.DiskName || alicloudDiskDevice.SnapshotId == disk.SourceSnapshotId {
|
||||||
|
return alicloudDiskDevice.Device
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
49
builder/alicloud/ecs/step_pre_validate.go
Normal file
49
builder/alicloud/ecs/step_pre_validate.go
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepPreValidate struct {
|
||||||
|
AlicloudDestImageName string
|
||||||
|
ForceDelete bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepPreValidate) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
if s.ForceDelete {
|
||||||
|
ui.Say("Force delete flag found, skipping prevalidating image name.")
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
ui.Say("Prevalidating image name...")
|
||||||
|
images, _, err := client.DescribeImages(&ecs.DescribeImagesArgs{
|
||||||
|
ImageName: s.AlicloudDestImageName,
|
||||||
|
RegionId: common.Region(config.AlicloudRegion)})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error querying alicloud image: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(images) > 0 {
|
||||||
|
err := fmt.Errorf("Error: name conflicts with an existing alicloud image: %s", images[0].ImageId)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepPreValidate) Cleanup(multistep.StateBag) {}
|
72
builder/alicloud/ecs/step_region_copy_image.go
Normal file
72
builder/alicloud/ecs/step_region_copy_image.go
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepRegionCopyAlicloudImage struct {
|
||||||
|
AlicloudImageDestinationRegions []string
|
||||||
|
AlicloudImageDestinationNames []string
|
||||||
|
RegionId string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepRegionCopyAlicloudImage) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
if len(s.AlicloudImageDestinationRegions) == 0 {
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
imageId := state.Get("alicloudimage").(string)
|
||||||
|
alicloudImages := state.Get("alicloudimages").(map[string]string)
|
||||||
|
region := common.Region(s.RegionId)
|
||||||
|
|
||||||
|
numberOfName := len(s.AlicloudImageDestinationNames)
|
||||||
|
for index, destinationRegion := range s.AlicloudImageDestinationRegions {
|
||||||
|
if destinationRegion == s.RegionId {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ecsImageName := ""
|
||||||
|
if numberOfName > 0 && index < numberOfName {
|
||||||
|
ecsImageName = s.AlicloudImageDestinationNames[index]
|
||||||
|
}
|
||||||
|
imageId, err := client.CopyImage(
|
||||||
|
&ecs.CopyImageArgs{
|
||||||
|
RegionId: region,
|
||||||
|
ImageId: imageId,
|
||||||
|
DestinationRegionId: common.Region(destinationRegion),
|
||||||
|
DestinationImageName: ecsImageName,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Error copying images: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
alicloudImages[destinationRegion] = imageId
|
||||||
|
}
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepRegionCopyAlicloudImage) Cleanup(state multistep.StateBag) {
|
||||||
|
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||||
|
_, halted := state.GetOk(multistep.StateHalted)
|
||||||
|
if cancelled || halted {
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
alicloudImages := state.Get("alicloudimages").(map[string]string)
|
||||||
|
ui.Say(fmt.Sprintf("Stopping copy image because cancellation or error..."))
|
||||||
|
for copiedRegionId, copiedImageId := range alicloudImages {
|
||||||
|
if copiedRegionId == s.RegionId {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := client.CancelCopyImage(common.Region(copiedRegionId), copiedImageId); err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Error cancelling copy image: %v", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
57
builder/alicloud/ecs/step_run_instance.go
Normal file
57
builder/alicloud/ecs/step_run_instance.go
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepRunAlicloudInstance struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepRunAlicloudInstance) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
|
||||||
|
err := client.StartInstance(instance.InstanceId)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error starting instance: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
ui.Say("Starting instance.")
|
||||||
|
err = client.WaitForInstance(instance.InstanceId, ecs.Running, ALICLOUD_DEFAULT_TIMEOUT)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Timeout waiting for instance to start: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepRunAlicloudInstance) Cleanup(state multistep.StateBag) {
|
||||||
|
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||||
|
_, halted := state.GetOk(multistep.StateHalted)
|
||||||
|
if cancelled || halted {
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
instanceAttribute, _ := client.DescribeInstanceAttribute(instance.InstanceId)
|
||||||
|
if instanceAttribute.Status == ecs.Starting || instanceAttribute.Status == ecs.Running {
|
||||||
|
if err := client.StopInstance(instance.InstanceId, true); err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Error stopping instance %s, it may still be around %s", instance.InstanceId, err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := client.WaitForInstance(instance.InstanceId, ecs.Stopped, ALICLOUD_DEFAULT_TIMEOUT); err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Error stopping instance %s, it may still be around %s", instance.InstanceId, err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
61
builder/alicloud/ecs/step_share_image.go
Normal file
61
builder/alicloud/ecs/step_share_image.go
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepShareAlicloudImage struct {
|
||||||
|
AlicloudImageShareAccounts []string
|
||||||
|
AlicloudImageUNShareAccounts []string
|
||||||
|
RegionId string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepShareAlicloudImage) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
alicloudImages := state.Get("alicloudimages").(map[string]string)
|
||||||
|
for copiedRegion, copiedImageId := range alicloudImages {
|
||||||
|
err := client.ModifyImageSharePermission(
|
||||||
|
&ecs.ModifyImageSharePermissionArgs{
|
||||||
|
RegionId: common.Region(copiedRegion),
|
||||||
|
ImageId: copiedImageId,
|
||||||
|
AddAccount: s.AlicloudImageShareAccounts,
|
||||||
|
RemoveAccount: s.AlicloudImageUNShareAccounts,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Failed modifying image share permissions: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepShareAlicloudImage) Cleanup(state multistep.StateBag) {
|
||||||
|
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||||
|
_, halted := state.GetOk(multistep.StateHalted)
|
||||||
|
if cancelled || halted {
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
alicloudImages := state.Get("alicloudimages").(map[string]string)
|
||||||
|
ui.Say("Restoring image share permission because cancellations or error...")
|
||||||
|
for copiedRegion, copiedImageId := range alicloudImages {
|
||||||
|
err := client.ModifyImageSharePermission(
|
||||||
|
&ecs.ModifyImageSharePermissionArgs{
|
||||||
|
RegionId: common.Region(copiedRegion),
|
||||||
|
ImageId: copiedImageId,
|
||||||
|
AddAccount: s.AlicloudImageUNShareAccounts,
|
||||||
|
RemoveAccount: s.AlicloudImageShareAccounts,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Restoring image share permission failed: %s", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
44
builder/alicloud/ecs/step_stop_instance.go
Normal file
44
builder/alicloud/ecs/step_stop_instance.go
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepStopAlicloudInstance struct {
|
||||||
|
ForceStop bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepStopAlicloudInstance) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
err := client.StopInstance(instance.InstanceId, s.ForceStop)
|
||||||
|
if err != nil {
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error stopping alicloud instance: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = client.WaitForInstance(instance.InstanceId, ecs.Stopped, ALICLOUD_DEFAULT_TIMEOUT)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error waiting for alicloud instance to stop: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepStopAlicloudInstance) Cleanup(multistep.StateBag) {
|
||||||
|
// No cleanup...
|
||||||
|
}
|
326
builder/amazon/chroot/builder.go
Normal file
326
builder/amazon/chroot/builder.go
Normal file
@ -0,0 +1,326 @@
|
|||||||
|
// The chroot package is able to create an Amazon AMI without requiring
|
||||||
|
// the launch of a new instance for every build. It does this by attaching
|
||||||
|
// and mounting the root volume of another AMI and chrooting into that
|
||||||
|
// directory. It then creates an AMI from that attached drive.
|
||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"log"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||||
|
"github.com/hashicorp/packer/common"
|
||||||
|
"github.com/hashicorp/packer/helper/config"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The unique ID for this builder
|
||||||
|
const BuilderId = "mitchellh.amazon.chroot"
|
||||||
|
|
||||||
|
// Config is the configuration that is chained through the steps and
|
||||||
|
// settable from the template.
|
||||||
|
type Config struct {
|
||||||
|
common.PackerConfig `mapstructure:",squash"`
|
||||||
|
awscommon.AMIBlockDevices `mapstructure:",squash"`
|
||||||
|
awscommon.AMIConfig `mapstructure:",squash"`
|
||||||
|
awscommon.AccessConfig `mapstructure:",squash"`
|
||||||
|
|
||||||
|
ChrootMounts [][]string `mapstructure:"chroot_mounts"`
|
||||||
|
CommandWrapper string `mapstructure:"command_wrapper"`
|
||||||
|
CopyFiles []string `mapstructure:"copy_files"`
|
||||||
|
DevicePath string `mapstructure:"device_path"`
|
||||||
|
NVMEDevicePath string `mapstructure:"nvme_device_path"`
|
||||||
|
FromScratch bool `mapstructure:"from_scratch"`
|
||||||
|
MountOptions []string `mapstructure:"mount_options"`
|
||||||
|
MountPartition string `mapstructure:"mount_partition"`
|
||||||
|
MountPath string `mapstructure:"mount_path"`
|
||||||
|
PostMountCommands []string `mapstructure:"post_mount_commands"`
|
||||||
|
PreMountCommands []string `mapstructure:"pre_mount_commands"`
|
||||||
|
RootDeviceName string `mapstructure:"root_device_name"`
|
||||||
|
RootVolumeSize int64 `mapstructure:"root_volume_size"`
|
||||||
|
SourceAmi string `mapstructure:"source_ami"`
|
||||||
|
SourceAmiFilter awscommon.AmiFilterOptions `mapstructure:"source_ami_filter"`
|
||||||
|
RootVolumeTags awscommon.TagMap `mapstructure:"root_volume_tags"`
|
||||||
|
|
||||||
|
ctx interpolate.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
type wrappedCommandTemplate struct {
|
||||||
|
Command string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Builder struct {
|
||||||
|
config Config
|
||||||
|
runner multistep.Runner
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
||||||
|
b.config.ctx.Funcs = awscommon.TemplateFuncs
|
||||||
|
err := config.Decode(&b.config, &config.DecodeOpts{
|
||||||
|
Interpolate: true,
|
||||||
|
InterpolateContext: &b.config.ctx,
|
||||||
|
InterpolateFilter: &interpolate.RenderFilter{
|
||||||
|
Exclude: []string{
|
||||||
|
"ami_description",
|
||||||
|
"snapshot_tags",
|
||||||
|
"tags",
|
||||||
|
"root_volume_tags",
|
||||||
|
"command_wrapper",
|
||||||
|
"post_mount_commands",
|
||||||
|
"pre_mount_commands",
|
||||||
|
"mount_path",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, raws...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.config.PackerConfig.PackerForce {
|
||||||
|
b.config.AMIForceDeregister = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Defaults
|
||||||
|
if b.config.ChrootMounts == nil {
|
||||||
|
b.config.ChrootMounts = make([][]string, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(b.config.ChrootMounts) == 0 {
|
||||||
|
b.config.ChrootMounts = [][]string{
|
||||||
|
{"proc", "proc", "/proc"},
|
||||||
|
{"sysfs", "sysfs", "/sys"},
|
||||||
|
{"bind", "/dev", "/dev"},
|
||||||
|
{"devpts", "devpts", "/dev/pts"},
|
||||||
|
{"binfmt_misc", "binfmt_misc", "/proc/sys/fs/binfmt_misc"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// set default copy file if we're not giving our own
|
||||||
|
if b.config.CopyFiles == nil {
|
||||||
|
b.config.CopyFiles = make([]string, 0)
|
||||||
|
if !b.config.FromScratch {
|
||||||
|
b.config.CopyFiles = []string{"/etc/resolv.conf"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.config.CommandWrapper == "" {
|
||||||
|
b.config.CommandWrapper = "{{.Command}}"
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.config.MountPath == "" {
|
||||||
|
b.config.MountPath = "/mnt/packer-amazon-chroot-volumes/{{.Device}}"
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.config.MountPartition == "" {
|
||||||
|
b.config.MountPartition = "1"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accumulate any errors or warnings
|
||||||
|
var errs *packer.MultiError
|
||||||
|
var warns []string
|
||||||
|
|
||||||
|
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...)
|
||||||
|
errs = packer.MultiErrorAppend(errs,
|
||||||
|
b.config.AMIConfig.Prepare(&b.config.AccessConfig, &b.config.ctx)...)
|
||||||
|
|
||||||
|
for _, mounts := range b.config.ChrootMounts {
|
||||||
|
if len(mounts) != 3 {
|
||||||
|
errs = packer.MultiErrorAppend(
|
||||||
|
errs, errors.New("Each chroot_mounts entry should be three elements."))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.config.FromScratch {
|
||||||
|
if b.config.SourceAmi != "" || !b.config.SourceAmiFilter.Empty() {
|
||||||
|
warns = append(warns, "source_ami and source_ami_filter are unused when from_scratch is true")
|
||||||
|
}
|
||||||
|
if b.config.RootVolumeSize == 0 {
|
||||||
|
errs = packer.MultiErrorAppend(
|
||||||
|
errs, errors.New("root_volume_size is required with from_scratch."))
|
||||||
|
}
|
||||||
|
if len(b.config.PreMountCommands) == 0 {
|
||||||
|
errs = packer.MultiErrorAppend(
|
||||||
|
errs, errors.New("pre_mount_commands is required with from_scratch."))
|
||||||
|
}
|
||||||
|
if b.config.AMIVirtType == "" {
|
||||||
|
errs = packer.MultiErrorAppend(
|
||||||
|
errs, errors.New("ami_virtualization_type is required with from_scratch."))
|
||||||
|
}
|
||||||
|
if b.config.RootDeviceName == "" {
|
||||||
|
errs = packer.MultiErrorAppend(
|
||||||
|
errs, errors.New("root_device_name is required with from_scratch."))
|
||||||
|
}
|
||||||
|
if len(b.config.AMIMappings) == 0 {
|
||||||
|
errs = packer.MultiErrorAppend(
|
||||||
|
errs, errors.New("ami_block_device_mappings is required with from_scratch."))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if b.config.SourceAmi == "" && b.config.SourceAmiFilter.Empty() {
|
||||||
|
errs = packer.MultiErrorAppend(
|
||||||
|
errs, errors.New("source_ami or source_ami_filter is required."))
|
||||||
|
}
|
||||||
|
if len(b.config.AMIMappings) != 0 {
|
||||||
|
warns = append(warns, "ami_block_device_mappings are unused when from_scratch is false")
|
||||||
|
}
|
||||||
|
if b.config.RootDeviceName != "" {
|
||||||
|
warns = append(warns, "root_device_name is unused when from_scratch is false")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if errs != nil && len(errs.Errors) > 0 {
|
||||||
|
return warns, errs
|
||||||
|
}
|
||||||
|
|
||||||
|
packer.LogSecretFilter.Set(b.config.AccessKey, b.config.SecretKey, b.config.Token)
|
||||||
|
log.Println(b.config)
|
||||||
|
return warns, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
|
||||||
|
if runtime.GOOS != "linux" {
|
||||||
|
return nil, errors.New("The amazon-chroot builder only works on Linux environments.")
|
||||||
|
}
|
||||||
|
|
||||||
|
session, err := b.config.Session()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ec2conn := ec2.New(session)
|
||||||
|
|
||||||
|
wrappedCommand := func(command string) (string, error) {
|
||||||
|
ctx := b.config.ctx
|
||||||
|
ctx.Data = &wrappedCommandTemplate{Command: command}
|
||||||
|
return interpolate.Render(b.config.CommandWrapper, &ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup the state bag and initial state for the steps
|
||||||
|
state := new(multistep.BasicStateBag)
|
||||||
|
state.Put("config", &b.config)
|
||||||
|
state.Put("ec2", ec2conn)
|
||||||
|
state.Put("awsSession", session)
|
||||||
|
state.Put("hook", hook)
|
||||||
|
state.Put("ui", ui)
|
||||||
|
state.Put("wrappedCommand", CommandWrapper(wrappedCommand))
|
||||||
|
|
||||||
|
// Build the steps
|
||||||
|
steps := []multistep.Step{
|
||||||
|
&awscommon.StepPreValidate{
|
||||||
|
DestAmiName: b.config.AMIName,
|
||||||
|
ForceDeregister: b.config.AMIForceDeregister,
|
||||||
|
},
|
||||||
|
&StepInstanceInfo{},
|
||||||
|
}
|
||||||
|
|
||||||
|
if !b.config.FromScratch {
|
||||||
|
steps = append(steps,
|
||||||
|
&awscommon.StepSourceAMIInfo{
|
||||||
|
SourceAmi: b.config.SourceAmi,
|
||||||
|
EnableAMISriovNetSupport: b.config.AMISriovNetSupport,
|
||||||
|
EnableAMIENASupport: b.config.AMIENASupport,
|
||||||
|
AmiFilters: b.config.SourceAmiFilter,
|
||||||
|
},
|
||||||
|
&StepCheckRootDevice{},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
steps = append(steps,
|
||||||
|
&StepFlock{},
|
||||||
|
&StepPrepareDevice{},
|
||||||
|
&StepCreateVolume{
|
||||||
|
RootVolumeSize: b.config.RootVolumeSize,
|
||||||
|
RootVolumeTags: b.config.RootVolumeTags,
|
||||||
|
Ctx: b.config.ctx,
|
||||||
|
},
|
||||||
|
&StepAttachVolume{},
|
||||||
|
&StepEarlyUnflock{},
|
||||||
|
&StepPreMountCommands{
|
||||||
|
Commands: b.config.PreMountCommands,
|
||||||
|
},
|
||||||
|
&StepMountDevice{
|
||||||
|
MountOptions: b.config.MountOptions,
|
||||||
|
MountPartition: b.config.MountPartition,
|
||||||
|
},
|
||||||
|
&StepPostMountCommands{
|
||||||
|
Commands: b.config.PostMountCommands,
|
||||||
|
},
|
||||||
|
&StepMountExtra{},
|
||||||
|
&StepCopyFiles{},
|
||||||
|
&StepChrootProvision{},
|
||||||
|
&StepEarlyCleanup{},
|
||||||
|
&StepSnapshot{},
|
||||||
|
&awscommon.StepDeregisterAMI{
|
||||||
|
AccessConfig: &b.config.AccessConfig,
|
||||||
|
ForceDeregister: b.config.AMIForceDeregister,
|
||||||
|
ForceDeleteSnapshot: b.config.AMIForceDeleteSnapshot,
|
||||||
|
AMIName: b.config.AMIName,
|
||||||
|
Regions: b.config.AMIRegions,
|
||||||
|
},
|
||||||
|
&StepRegisterAMI{
|
||||||
|
RootVolumeSize: b.config.RootVolumeSize,
|
||||||
|
EnableAMISriovNetSupport: b.config.AMISriovNetSupport,
|
||||||
|
EnableAMIENASupport: b.config.AMIENASupport,
|
||||||
|
},
|
||||||
|
&awscommon.StepCreateEncryptedAMICopy{
|
||||||
|
KeyID: b.config.AMIKmsKeyId,
|
||||||
|
EncryptBootVolume: b.config.AMIEncryptBootVolume,
|
||||||
|
Name: b.config.AMIName,
|
||||||
|
AMIMappings: b.config.AMIBlockDevices.AMIMappings,
|
||||||
|
},
|
||||||
|
&awscommon.StepAMIRegionCopy{
|
||||||
|
AccessConfig: &b.config.AccessConfig,
|
||||||
|
Regions: b.config.AMIRegions,
|
||||||
|
RegionKeyIds: b.config.AMIRegionKMSKeyIDs,
|
||||||
|
EncryptBootVolume: b.config.AMIEncryptBootVolume,
|
||||||
|
Name: b.config.AMIName,
|
||||||
|
},
|
||||||
|
&awscommon.StepModifyAMIAttributes{
|
||||||
|
Description: b.config.AMIDescription,
|
||||||
|
Users: b.config.AMIUsers,
|
||||||
|
Groups: b.config.AMIGroups,
|
||||||
|
ProductCodes: b.config.AMIProductCodes,
|
||||||
|
SnapshotUsers: b.config.SnapshotUsers,
|
||||||
|
SnapshotGroups: b.config.SnapshotGroups,
|
||||||
|
Ctx: b.config.ctx,
|
||||||
|
},
|
||||||
|
&awscommon.StepCreateTags{
|
||||||
|
Tags: b.config.AMITags,
|
||||||
|
SnapshotTags: b.config.SnapshotTags,
|
||||||
|
Ctx: b.config.ctx,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
// Run!
|
||||||
|
b.runner = common.NewRunner(steps, b.config.PackerConfig, ui)
|
||||||
|
b.runner.Run(state)
|
||||||
|
|
||||||
|
// If there was an error, return that
|
||||||
|
if rawErr, ok := state.GetOk("error"); ok {
|
||||||
|
return nil, rawErr.(error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are no AMIs, then just return
|
||||||
|
if _, ok := state.GetOk("amis"); !ok {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the artifact and return it
|
||||||
|
artifact := &awscommon.Artifact{
|
||||||
|
Amis: state.Get("amis").(map[string]string),
|
||||||
|
BuilderIdValue: BuilderId,
|
||||||
|
Session: session,
|
||||||
|
}
|
||||||
|
|
||||||
|
return artifact, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) Cancel() {
|
||||||
|
if b.runner != nil {
|
||||||
|
log.Println("Cancelling the step runner...")
|
||||||
|
b.runner.Cancel()
|
||||||
|
}
|
||||||
|
}
|
161
builder/amazon/chroot/builder_test.go
Normal file
161
builder/amazon/chroot/builder_test.go
Normal file
@ -0,0 +1,161 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testConfig() map[string]interface{} {
|
||||||
|
return map[string]interface{}{
|
||||||
|
"ami_name": "foo",
|
||||||
|
"source_ami": "foo",
|
||||||
|
"region": "us-east-1",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilder_ImplementsBuilder(t *testing.T) {
|
||||||
|
var raw interface{}
|
||||||
|
raw = &Builder{}
|
||||||
|
if _, ok := raw.(packer.Builder); !ok {
|
||||||
|
t.Fatalf("Builder should be a builder")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderPrepare_AMIName(t *testing.T) {
|
||||||
|
var b Builder
|
||||||
|
config := testConfig()
|
||||||
|
|
||||||
|
// Test good
|
||||||
|
config["ami_name"] = "foo"
|
||||||
|
warnings, err := b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("should not have error: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test bad
|
||||||
|
config["ami_name"] = "foo {{"
|
||||||
|
b = Builder{}
|
||||||
|
warnings, err = b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("should have error")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test bad
|
||||||
|
delete(config, "ami_name")
|
||||||
|
b = Builder{}
|
||||||
|
warnings, err = b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("should have error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderPrepare_ChrootMounts(t *testing.T) {
|
||||||
|
b := &Builder{}
|
||||||
|
config := testConfig()
|
||||||
|
|
||||||
|
config["chroot_mounts"] = nil
|
||||||
|
warnings, err := b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderPrepare_ChrootMountsBadDefaults(t *testing.T) {
|
||||||
|
b := &Builder{}
|
||||||
|
config := testConfig()
|
||||||
|
|
||||||
|
config["chroot_mounts"] = [][]string{
|
||||||
|
{"bad"},
|
||||||
|
}
|
||||||
|
warnings, err := b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("should have error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func TestBuilderPrepare_SourceAmi(t *testing.T) {
|
||||||
|
b := &Builder{}
|
||||||
|
config := testConfig()
|
||||||
|
|
||||||
|
config["source_ami"] = ""
|
||||||
|
warnings, err := b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("should have error")
|
||||||
|
}
|
||||||
|
|
||||||
|
config["source_ami"] = "foo"
|
||||||
|
warnings, err = b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderPrepare_CommandWrapper(t *testing.T) {
|
||||||
|
b := &Builder{}
|
||||||
|
config := testConfig()
|
||||||
|
|
||||||
|
config["command_wrapper"] = "echo hi; {{.Command}}"
|
||||||
|
warnings, err := b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderPrepare_CopyFiles(t *testing.T) {
|
||||||
|
b := &Builder{}
|
||||||
|
config := testConfig()
|
||||||
|
|
||||||
|
warnings, err := b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(b.config.CopyFiles) != 1 && b.config.CopyFiles[0] != "/etc/resolv.conf" {
|
||||||
|
t.Errorf("Was expecting default value for copy_files.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderPrepare_CopyFilesNoDefault(t *testing.T) {
|
||||||
|
b := &Builder{}
|
||||||
|
config := testConfig()
|
||||||
|
|
||||||
|
config["copy_files"] = []string{}
|
||||||
|
warnings, err := b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(b.config.CopyFiles) > 0 {
|
||||||
|
t.Errorf("Was expecting no default value for copy_files.")
|
||||||
|
}
|
||||||
|
}
|
10
builder/amazon/chroot/cleanup.go
Normal file
10
builder/amazon/chroot/cleanup.go
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Cleanup is an interface that some steps implement for early cleanup.
|
||||||
|
type Cleanup interface {
|
||||||
|
CleanupFunc(multistep.StateBag) error
|
||||||
|
}
|
15
builder/amazon/chroot/command.go
Normal file
15
builder/amazon/chroot/command.go
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os/exec"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CommandWrapper is a type that given a command, will possibly modify that
|
||||||
|
// command in-flight. This might return an error.
|
||||||
|
type CommandWrapper func(string) (string, error)
|
||||||
|
|
||||||
|
// ShellCommand takes a command string and returns an *exec.Cmd to execute
|
||||||
|
// it within the context of a shell (/bin/sh).
|
||||||
|
func ShellCommand(command string) *exec.Cmd {
|
||||||
|
return exec.Command("/bin/sh", "-c", command)
|
||||||
|
}
|
142
builder/amazon/chroot/communicator.go
Normal file
142
builder/amazon/chroot/communicator.go
Normal file
@ -0,0 +1,142 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Communicator is a special communicator that works by executing
|
||||||
|
// commands locally but within a chroot.
|
||||||
|
type Communicator struct {
|
||||||
|
Chroot string
|
||||||
|
CmdWrapper CommandWrapper
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Communicator) Start(cmd *packer.RemoteCmd) error {
|
||||||
|
// need extra escapes for the command since we're wrapping it in quotes
|
||||||
|
cmd.Command = strconv.Quote(cmd.Command)
|
||||||
|
command, err := c.CmdWrapper(
|
||||||
|
fmt.Sprintf("chroot %s /bin/sh -c %s", c.Chroot, cmd.Command))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
localCmd := ShellCommand(command)
|
||||||
|
localCmd.Stdin = cmd.Stdin
|
||||||
|
localCmd.Stdout = cmd.Stdout
|
||||||
|
localCmd.Stderr = cmd.Stderr
|
||||||
|
log.Printf("Executing: %s %#v", localCmd.Path, localCmd.Args)
|
||||||
|
if err := localCmd.Start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
exitStatus := 0
|
||||||
|
if err := localCmd.Wait(); err != nil {
|
||||||
|
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||||
|
exitStatus = 1
|
||||||
|
|
||||||
|
// There is no process-independent way to get the REAL
|
||||||
|
// exit status so we just try to go deeper.
|
||||||
|
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
|
||||||
|
exitStatus = status.ExitStatus()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf(
|
||||||
|
"Chroot execution exited with '%d': '%s'",
|
||||||
|
exitStatus, cmd.Command)
|
||||||
|
cmd.SetExited(exitStatus)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Communicator) Upload(dst string, r io.Reader, fi *os.FileInfo) error {
|
||||||
|
dst = filepath.Join(c.Chroot, dst)
|
||||||
|
log.Printf("Uploading to chroot dir: %s", dst)
|
||||||
|
tf, err := ioutil.TempFile("", "packer-amazon-chroot")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error preparing shell script: %s", err)
|
||||||
|
}
|
||||||
|
defer os.Remove(tf.Name())
|
||||||
|
|
||||||
|
if _, err := io.Copy(tf, r); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cpCmd, err := c.CmdWrapper(fmt.Sprintf("cp %s %s", tf.Name(), dst))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ShellCommand(cpCmd).Run()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Communicator) UploadDir(dst string, src string, exclude []string) error {
|
||||||
|
// If src ends with a trailing "/", copy from "src/." so that
|
||||||
|
// directory contents (including hidden files) are copied, but the
|
||||||
|
// directory "src" is omitted. BSD does this automatically when
|
||||||
|
// the source contains a trailing slash, but linux does not.
|
||||||
|
if src[len(src)-1] == '/' {
|
||||||
|
src = src + "."
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: remove any file copied if it appears in `exclude`
|
||||||
|
chrootDest := filepath.Join(c.Chroot, dst)
|
||||||
|
|
||||||
|
log.Printf("Uploading directory '%s' to '%s'", src, chrootDest)
|
||||||
|
cpCmd, err := c.CmdWrapper(fmt.Sprintf("cp -R '%s' %s", src, chrootDest))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var stderr bytes.Buffer
|
||||||
|
cmd := ShellCommand(cpCmd)
|
||||||
|
cmd.Env = append(cmd.Env, "LANG=C")
|
||||||
|
cmd.Env = append(cmd.Env, os.Environ()...)
|
||||||
|
cmd.Stderr = &stderr
|
||||||
|
err = cmd.Run()
|
||||||
|
if err == nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(stderr.String(), "No such file") {
|
||||||
|
// This just means that the directory was empty. Just ignore it.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Communicator) DownloadDir(src string, dst string, exclude []string) error {
|
||||||
|
return fmt.Errorf("DownloadDir is not implemented for amazon-chroot")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Communicator) Download(src string, w io.Writer) error {
|
||||||
|
src = filepath.Join(c.Chroot, src)
|
||||||
|
log.Printf("Downloading from chroot dir: %s", src)
|
||||||
|
f, err := os.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
if _, err := io.Copy(w, f); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
15
builder/amazon/chroot/communicator_test.go
Normal file
15
builder/amazon/chroot/communicator_test.go
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCommunicator_ImplementsCommunicator(t *testing.T) {
|
||||||
|
var raw interface{}
|
||||||
|
raw = &Communicator{}
|
||||||
|
if _, ok := raw.(packer.Communicator); !ok {
|
||||||
|
t.Fatalf("Communicator should be a communicator")
|
||||||
|
}
|
||||||
|
}
|
1
builder/amazon/chroot/copy_files.go
Normal file
1
builder/amazon/chroot/copy_files.go
Normal file
@ -0,0 +1 @@
|
|||||||
|
package chroot
|
49
builder/amazon/chroot/copy_files_test.go
Normal file
49
builder/amazon/chroot/copy_files_test.go
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCopyFile(t *testing.T) {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
first, err := ioutil.TempFile("", "copy_files_test")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("couldn't create temp file.")
|
||||||
|
}
|
||||||
|
defer os.Remove(first.Name())
|
||||||
|
newName := first.Name() + "-new"
|
||||||
|
|
||||||
|
payload := "copy_files_test.go payload"
|
||||||
|
if _, err = first.WriteString(payload); err != nil {
|
||||||
|
t.Fatalf("Couldn't write payload to first file.")
|
||||||
|
}
|
||||||
|
first.Sync()
|
||||||
|
|
||||||
|
cmd := ShellCommand(fmt.Sprintf("cp %s %s", first.Name(), newName))
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
t.Fatalf("Couldn't copy file")
|
||||||
|
}
|
||||||
|
defer os.Remove(newName)
|
||||||
|
|
||||||
|
second, err := os.Open(newName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Couldn't open copied file.")
|
||||||
|
}
|
||||||
|
defer second.Close()
|
||||||
|
|
||||||
|
var copiedPayload = make([]byte, len(payload))
|
||||||
|
if _, err := second.Read(copiedPayload); err != nil {
|
||||||
|
t.Fatalf("Couldn't open copied file for reading.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(copiedPayload) != payload {
|
||||||
|
t.Fatalf("payload not copied.")
|
||||||
|
}
|
||||||
|
}
|
70
builder/amazon/chroot/device.go
Normal file
70
builder/amazon/chroot/device.go
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AvailableDevice finds an available device and returns it. Note that
|
||||||
|
// you should externally hold a flock or something in order to guarantee
|
||||||
|
// that this device is available across processes.
|
||||||
|
func AvailableDevice() (string, error) {
|
||||||
|
prefix, err := devicePrefix()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
letters := "fghijklmnop"
|
||||||
|
for _, letter := range letters {
|
||||||
|
device := fmt.Sprintf("/dev/%s%c", prefix, letter)
|
||||||
|
|
||||||
|
// If the block device itself, i.e. /dev/sf, exists, then we
|
||||||
|
// can't use any of the numbers either.
|
||||||
|
if _, err := os.Stat(device); err == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// To be able to build both Paravirtual and HVM images, the unnumbered
|
||||||
|
// device and the first numbered one must be available.
|
||||||
|
// E.g. /dev/xvdf and /dev/xvdf1
|
||||||
|
numbered_device := fmt.Sprintf("%s%d", device, 1)
|
||||||
|
if _, err := os.Stat(numbered_device); err != nil {
|
||||||
|
return device, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", errors.New("available device could not be found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// devicePrefix returns the prefix ("sd" or "xvd" or so on) of the devices
|
||||||
|
// on the system.
|
||||||
|
func devicePrefix() (string, error) {
|
||||||
|
available := []string{"sd", "xvd"}
|
||||||
|
|
||||||
|
f, err := os.Open("/sys/block")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
dirs, err := f.Readdirnames(-1)
|
||||||
|
if dirs != nil && len(dirs) > 0 {
|
||||||
|
for _, dir := range dirs {
|
||||||
|
dirBase := filepath.Base(dir)
|
||||||
|
for _, prefix := range available {
|
||||||
|
if strings.HasPrefix(dirBase, prefix) {
|
||||||
|
return prefix, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", errors.New("device prefix could not be detected")
|
||||||
|
}
|
10
builder/amazon/chroot/device_test.go
Normal file
10
builder/amazon/chroot/device_test.go
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestDevicePrefixMatch(t *testing.T) {
|
||||||
|
/*
|
||||||
|
if devicePrefixMatch("nvme0n1") != "" {
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
}
|
16
builder/amazon/chroot/lockfile.go
Normal file
16
builder/amazon/chroot/lockfile.go
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
// +build windows
|
||||||
|
|
||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func lockFile(*os.File) error {
|
||||||
|
return errors.New("not supported on Windows")
|
||||||
|
}
|
||||||
|
|
||||||
|
func unlockFile(f *os.File) error {
|
||||||
|
return nil
|
||||||
|
}
|
27
builder/amazon/chroot/lockfile_unix.go
Normal file
27
builder/amazon/chroot/lockfile_unix.go
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// See: http://linux.die.net/include/sys/file.h
|
||||||
|
const LOCK_EX = 2
|
||||||
|
const LOCK_NB = 4
|
||||||
|
const LOCK_UN = 8
|
||||||
|
|
||||||
|
func lockFile(f *os.File) error {
|
||||||
|
err := unix.Flock(int(f.Fd()), LOCK_EX)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unlockFile(f *os.File) error {
|
||||||
|
return unix.Flock(int(f.Fd()), LOCK_UN)
|
||||||
|
}
|
39
builder/amazon/chroot/run_local_commands.go
Normal file
39
builder/amazon/chroot/run_local_commands.go
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
sl "github.com/hashicorp/packer/common/shell-local"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
|
)
|
||||||
|
|
||||||
|
func RunLocalCommands(commands []string, wrappedCommand CommandWrapper, ctx interpolate.Context, ui packer.Ui) error {
|
||||||
|
for _, rawCmd := range commands {
|
||||||
|
intCmd, err := interpolate.Render(rawCmd, &ctx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error interpolating: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
command, err := wrappedCommand(intCmd)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error wrapping command: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ui.Say(fmt.Sprintf("Executing command: %s", command))
|
||||||
|
comm := &sl.Communicator{
|
||||||
|
ExecuteCommand: []string{"sh", "-c", command},
|
||||||
|
}
|
||||||
|
cmd := &packer.RemoteCmd{Command: command}
|
||||||
|
if err := cmd.StartWithUi(comm, ui); err != nil {
|
||||||
|
return fmt.Errorf("Error executing command: %s", err)
|
||||||
|
}
|
||||||
|
if cmd.ExitStatus != 0 {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"Received non-zero exit code %d from command: %s",
|
||||||
|
cmd.ExitStatus,
|
||||||
|
command)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
96
builder/amazon/chroot/step_attach_volume.go
Normal file
96
builder/amazon/chroot/step_attach_volume.go
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StepAttachVolume attaches the previously created volume to an
|
||||||
|
// available device location.
|
||||||
|
//
|
||||||
|
// Produces:
|
||||||
|
// device string - The location where the volume was attached.
|
||||||
|
// attach_cleanup CleanupFunc
|
||||||
|
type StepAttachVolume struct {
|
||||||
|
attached bool
|
||||||
|
volumeId string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepAttachVolume) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
|
device := state.Get("device").(string)
|
||||||
|
instance := state.Get("instance").(*ec2.Instance)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
volumeId := state.Get("volume_id").(string)
|
||||||
|
|
||||||
|
// For the API call, it expects "sd" prefixed devices.
|
||||||
|
attachVolume := strings.Replace(device, "/xvd", "/sd", 1)
|
||||||
|
|
||||||
|
ui.Say(fmt.Sprintf("Attaching the root volume to %s", attachVolume))
|
||||||
|
_, err := ec2conn.AttachVolume(&ec2.AttachVolumeInput{
|
||||||
|
InstanceId: instance.InstanceId,
|
||||||
|
VolumeId: &volumeId,
|
||||||
|
Device: &attachVolume,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error attaching volume: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark that we attached it so we can detach it later
|
||||||
|
s.attached = true
|
||||||
|
s.volumeId = volumeId
|
||||||
|
|
||||||
|
// Wait for the volume to become attached
|
||||||
|
err = awscommon.WaitUntilVolumeAttached(ctx, ec2conn, s.volumeId)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error waiting for volume: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
state.Put("attach_cleanup", s)
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepAttachVolume) Cleanup(state multistep.StateBag) {
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
if err := s.CleanupFunc(state); err != nil {
|
||||||
|
ui.Error(err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepAttachVolume) CleanupFunc(state multistep.StateBag) error {
|
||||||
|
if !s.attached {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
ui.Say("Detaching EBS volume...")
|
||||||
|
_, err := ec2conn.DetachVolume(&ec2.DetachVolumeInput{VolumeId: &s.volumeId})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error detaching EBS volume: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.attached = false
|
||||||
|
|
||||||
|
// Wait for the volume to detach
|
||||||
|
err = awscommon.WaitUntilVolumeDetached(aws.BackgroundContext(), ec2conn, s.volumeId)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error waiting for volume: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
11
builder/amazon/chroot/step_attach_volume_test.go
Normal file
11
builder/amazon/chroot/step_attach_volume_test.go
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestAttachVolumeCleanupFunc_ImplementsCleanupFunc(t *testing.T) {
|
||||||
|
var raw interface{}
|
||||||
|
raw = new(StepAttachVolume)
|
||||||
|
if _, ok := raw.(Cleanup); !ok {
|
||||||
|
t.Fatalf("cleanup func should be a CleanupFunc")
|
||||||
|
}
|
||||||
|
}
|
32
builder/amazon/chroot/step_check_root_device.go
Normal file
32
builder/amazon/chroot/step_check_root_device.go
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StepCheckRootDevice makes sure the root device on the AMI is EBS-backed.
|
||||||
|
type StepCheckRootDevice struct{}
|
||||||
|
|
||||||
|
func (s *StepCheckRootDevice) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
image := state.Get("source_image").(*ec2.Image)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
ui.Say("Checking the root device on source AMI...")
|
||||||
|
|
||||||
|
// It must be EBS-backed otherwise the build won't work
|
||||||
|
if *image.RootDeviceType != "ebs" {
|
||||||
|
err := fmt.Errorf("The root device of the source AMI must be EBS-backed.")
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepCheckRootDevice) Cleanup(multistep.StateBag) {}
|
37
builder/amazon/chroot/step_chroot_provision.go
Normal file
37
builder/amazon/chroot/step_chroot_provision.go
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StepChrootProvision provisions the instance within a chroot.
|
||||||
|
type StepChrootProvision struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepChrootProvision) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
hook := state.Get("hook").(packer.Hook)
|
||||||
|
mountPath := state.Get("mount_path").(string)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
wrappedCommand := state.Get("wrappedCommand").(CommandWrapper)
|
||||||
|
|
||||||
|
// Create our communicator
|
||||||
|
comm := &Communicator{
|
||||||
|
Chroot: mountPath,
|
||||||
|
CmdWrapper: wrappedCommand,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Provision
|
||||||
|
log.Println("Running the provision hook")
|
||||||
|
if err := hook.Run(packer.HookProvision, ui, comm, nil); err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepChrootProvision) Cleanup(state multistep.StateBag) {}
|
91
builder/amazon/chroot/step_copy_files.go
Normal file
91
builder/amazon/chroot/step_copy_files.go
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StepCopyFiles copies some files from the host into the chroot environment.
|
||||||
|
//
|
||||||
|
// Produces:
|
||||||
|
// copy_files_cleanup CleanupFunc - A function to clean up the copied files
|
||||||
|
// early.
|
||||||
|
type StepCopyFiles struct {
|
||||||
|
files []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepCopyFiles) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
config := state.Get("config").(*Config)
|
||||||
|
mountPath := state.Get("mount_path").(string)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
wrappedCommand := state.Get("wrappedCommand").(CommandWrapper)
|
||||||
|
stderr := new(bytes.Buffer)
|
||||||
|
|
||||||
|
s.files = make([]string, 0, len(config.CopyFiles))
|
||||||
|
if len(config.CopyFiles) > 0 {
|
||||||
|
ui.Say("Copying files from host to chroot...")
|
||||||
|
for _, path := range config.CopyFiles {
|
||||||
|
ui.Message(path)
|
||||||
|
chrootPath := filepath.Join(mountPath, path)
|
||||||
|
log.Printf("Copying '%s' to '%s'", path, chrootPath)
|
||||||
|
|
||||||
|
cmdText, err := wrappedCommand(fmt.Sprintf("cp --remove-destination %s %s", path, chrootPath))
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error building copy command: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
stderr.Reset()
|
||||||
|
cmd := ShellCommand(cmdText)
|
||||||
|
cmd.Stderr = stderr
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
err := fmt.Errorf(
|
||||||
|
"Error copying file: %s\nnStderr: %s", err, stderr.String())
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
s.files = append(s.files, chrootPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
state.Put("copy_files_cleanup", s)
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepCopyFiles) Cleanup(state multistep.StateBag) {
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
if err := s.CleanupFunc(state); err != nil {
|
||||||
|
ui.Error(err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepCopyFiles) CleanupFunc(state multistep.StateBag) error {
|
||||||
|
wrappedCommand := state.Get("wrappedCommand").(CommandWrapper)
|
||||||
|
if s.files != nil {
|
||||||
|
for _, file := range s.files {
|
||||||
|
log.Printf("Removing: %s", file)
|
||||||
|
localCmdText, err := wrappedCommand(fmt.Sprintf("rm -f %s", file))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
localCmd := ShellCommand(localCmdText)
|
||||||
|
if err := localCmd.Run(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.files = nil
|
||||||
|
return nil
|
||||||
|
}
|
11
builder/amazon/chroot/step_copy_files_test.go
Normal file
11
builder/amazon/chroot/step_copy_files_test.go
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestCopyFilesCleanupFunc_ImplementsCleanupFunc(t *testing.T) {
|
||||||
|
var raw interface{}
|
||||||
|
raw = new(StepCopyFiles)
|
||||||
|
if _, ok := raw.(Cleanup); !ok {
|
||||||
|
t.Fatalf("cleanup func should be a CleanupFunc")
|
||||||
|
}
|
||||||
|
}
|
139
builder/amazon/chroot/step_create_volume.go
Normal file
139
builder/amazon/chroot/step_create_volume.go
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StepCreateVolume creates a new volume from the snapshot of the root
|
||||||
|
// device of the AMI.
|
||||||
|
//
|
||||||
|
// Produces:
|
||||||
|
// volume_id string - The ID of the created volume
|
||||||
|
type StepCreateVolume struct {
|
||||||
|
volumeId string
|
||||||
|
RootVolumeSize int64
|
||||||
|
RootVolumeTags awscommon.TagMap
|
||||||
|
Ctx interpolate.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepCreateVolume) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
config := state.Get("config").(*Config)
|
||||||
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
|
instance := state.Get("instance").(*ec2.Instance)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
volTags, err := s.RootVolumeTags.EC2Tags(s.Ctx, *ec2conn.Config.Region, state)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error tagging volumes: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect tags for tagging on resource creation
|
||||||
|
var tagSpecs []*ec2.TagSpecification
|
||||||
|
|
||||||
|
if len(volTags) > 0 {
|
||||||
|
runVolTags := &ec2.TagSpecification{
|
||||||
|
ResourceType: aws.String("volume"),
|
||||||
|
Tags: volTags,
|
||||||
|
}
|
||||||
|
|
||||||
|
tagSpecs = append(tagSpecs, runVolTags)
|
||||||
|
}
|
||||||
|
|
||||||
|
var createVolume *ec2.CreateVolumeInput
|
||||||
|
if config.FromScratch {
|
||||||
|
createVolume = &ec2.CreateVolumeInput{
|
||||||
|
AvailabilityZone: instance.Placement.AvailabilityZone,
|
||||||
|
Size: aws.Int64(s.RootVolumeSize),
|
||||||
|
VolumeType: aws.String(ec2.VolumeTypeGp2),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Determine the root device snapshot
|
||||||
|
image := state.Get("source_image").(*ec2.Image)
|
||||||
|
log.Printf("Searching for root device of the image (%s)", *image.RootDeviceName)
|
||||||
|
var rootDevice *ec2.BlockDeviceMapping
|
||||||
|
for _, device := range image.BlockDeviceMappings {
|
||||||
|
if *device.DeviceName == *image.RootDeviceName {
|
||||||
|
rootDevice = device
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rootDevice == nil {
|
||||||
|
err := fmt.Errorf("Couldn't find root device!")
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
ui.Say("Creating the root volume...")
|
||||||
|
vs := *rootDevice.Ebs.VolumeSize
|
||||||
|
if s.RootVolumeSize > *rootDevice.Ebs.VolumeSize {
|
||||||
|
vs = s.RootVolumeSize
|
||||||
|
}
|
||||||
|
|
||||||
|
createVolume = &ec2.CreateVolumeInput{
|
||||||
|
AvailabilityZone: instance.Placement.AvailabilityZone,
|
||||||
|
Size: aws.Int64(vs),
|
||||||
|
SnapshotId: rootDevice.Ebs.SnapshotId,
|
||||||
|
VolumeType: rootDevice.Ebs.VolumeType,
|
||||||
|
Iops: rootDevice.Ebs.Iops,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(tagSpecs) > 0 {
|
||||||
|
createVolume.SetTagSpecifications(tagSpecs)
|
||||||
|
volTags.Report(ui)
|
||||||
|
}
|
||||||
|
log.Printf("Create args: %+v", createVolume)
|
||||||
|
|
||||||
|
createVolumeResp, err := ec2conn.CreateVolume(createVolume)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error creating root volume: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the volume ID so we remember to delete it later
|
||||||
|
s.volumeId = *createVolumeResp.VolumeId
|
||||||
|
log.Printf("Volume ID: %s", s.volumeId)
|
||||||
|
|
||||||
|
// Wait for the volume to become ready
|
||||||
|
err = awscommon.WaitUntilVolumeAvailable(ctx, ec2conn, s.volumeId)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error waiting for volume: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
state.Put("volume_id", s.volumeId)
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepCreateVolume) Cleanup(state multistep.StateBag) {
|
||||||
|
if s.volumeId == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
ui.Say("Deleting the created EBS volume...")
|
||||||
|
_, err := ec2conn.DeleteVolume(&ec2.DeleteVolumeInput{VolumeId: &s.volumeId})
|
||||||
|
if err != nil {
|
||||||
|
ui.Error(fmt.Sprintf("Error deleting EBS volume: %s", err))
|
||||||
|
}
|
||||||
|
}
|
39
builder/amazon/chroot/step_early_cleanup.go
Normal file
39
builder/amazon/chroot/step_early_cleanup.go
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StepEarlyCleanup performs some of the cleanup steps early in order to
|
||||||
|
// prepare for snapshotting and creating an AMI.
|
||||||
|
type StepEarlyCleanup struct{}
|
||||||
|
|
||||||
|
func (s *StepEarlyCleanup) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
cleanupKeys := []string{
|
||||||
|
"copy_files_cleanup",
|
||||||
|
"mount_extra_cleanup",
|
||||||
|
"mount_device_cleanup",
|
||||||
|
"attach_cleanup",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, key := range cleanupKeys {
|
||||||
|
c := state.Get(key).(Cleanup)
|
||||||
|
log.Printf("Running cleanup func: %s", key)
|
||||||
|
if err := c.CleanupFunc(state); err != nil {
|
||||||
|
err := fmt.Errorf("Error cleaning up: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepEarlyCleanup) Cleanup(state multistep.StateBag) {}
|
30
builder/amazon/chroot/step_early_unflock.go
Normal file
30
builder/amazon/chroot/step_early_unflock.go
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StepEarlyUnflock unlocks the flock.
|
||||||
|
type StepEarlyUnflock struct{}
|
||||||
|
|
||||||
|
func (s *StepEarlyUnflock) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
cleanup := state.Get("flock_cleanup").(Cleanup)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
log.Println("Unlocking file lock...")
|
||||||
|
if err := cleanup.CleanupFunc(state); err != nil {
|
||||||
|
err := fmt.Errorf("Error unlocking file lock: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepEarlyUnflock) Cleanup(state multistep.StateBag) {}
|
74
builder/amazon/chroot/step_flock.go
Normal file
74
builder/amazon/chroot/step_flock.go
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StepFlock provisions the instance within a chroot.
|
||||||
|
//
|
||||||
|
// Produces:
|
||||||
|
// flock_cleanup Cleanup - To perform early cleanup
|
||||||
|
type StepFlock struct {
|
||||||
|
fh *os.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepFlock) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
lockfile := "/var/lock/packer-chroot/lock"
|
||||||
|
if err := os.MkdirAll(filepath.Dir(lockfile), 0755); err != nil {
|
||||||
|
err := fmt.Errorf("Error creating lock: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Obtaining lock: %s", lockfile)
|
||||||
|
f, err := os.Create(lockfile)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error creating lock: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
// LOCK!
|
||||||
|
if err := lockFile(f); err != nil {
|
||||||
|
err := fmt.Errorf("Error obtaining lock: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the file handle, we can't close it because we need to hold
|
||||||
|
// the lock.
|
||||||
|
s.fh = f
|
||||||
|
|
||||||
|
state.Put("flock_cleanup", s)
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepFlock) Cleanup(state multistep.StateBag) {
|
||||||
|
s.CleanupFunc(state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepFlock) CleanupFunc(state multistep.StateBag) error {
|
||||||
|
if s.fh == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Unlocking: %s", s.fh.Name())
|
||||||
|
if err := unlockFile(s.fh); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.fh = nil
|
||||||
|
return nil
|
||||||
|
}
|
11
builder/amazon/chroot/step_flock_test.go
Normal file
11
builder/amazon/chroot/step_flock_test.go
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestFlockCleanupFunc_ImplementsCleanupFunc(t *testing.T) {
|
||||||
|
var raw interface{}
|
||||||
|
raw = new(StepFlock)
|
||||||
|
if _, ok := raw.(Cleanup); !ok {
|
||||||
|
t.Fatalf("cleanup func should be a CleanupFunc")
|
||||||
|
}
|
||||||
|
}
|
60
builder/amazon/chroot/step_instance_info.go
Normal file
60
builder/amazon/chroot/step_instance_info.go
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StepInstanceInfo verifies that this builder is running on an EC2 instance.
|
||||||
|
type StepInstanceInfo struct{}
|
||||||
|
|
||||||
|
func (s *StepInstanceInfo) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
|
session := state.Get("awsSession").(*session.Session)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
// Get our own instance ID
|
||||||
|
ui.Say("Gathering information about this EC2 instance...")
|
||||||
|
|
||||||
|
ec2meta := ec2metadata.New(session)
|
||||||
|
identity, err := ec2meta.GetInstanceIdentityDocument()
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf(
|
||||||
|
"Error retrieving the ID of the instance Packer is running on.\n" +
|
||||||
|
"Please verify Packer is running on a proper AWS EC2 instance.")
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
log.Printf("Instance ID: %s", identity.InstanceID)
|
||||||
|
|
||||||
|
// Query the entire instance metadata
|
||||||
|
instancesResp, err := ec2conn.DescribeInstances(&ec2.DescribeInstancesInput{InstanceIds: []*string{&identity.InstanceID}})
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error getting instance data: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(instancesResp.Reservations) == 0 {
|
||||||
|
err := fmt.Errorf("Error getting instance data: no instance found.")
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
instance := instancesResp.Reservations[0].Instances[0]
|
||||||
|
state.Put("instance", instance)
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepInstanceInfo) Cleanup(multistep.StateBag) {}
|
153
builder/amazon/chroot/step_mount_device.go
Normal file
153
builder/amazon/chroot/step_mount_device.go
Normal file
@ -0,0 +1,153 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mountPathData struct {
|
||||||
|
Device string
|
||||||
|
}
|
||||||
|
|
||||||
|
// StepMountDevice mounts the attached device.
|
||||||
|
//
|
||||||
|
// Produces:
|
||||||
|
// mount_path string - The location where the volume was mounted.
|
||||||
|
// mount_device_cleanup CleanupFunc - To perform early cleanup
|
||||||
|
type StepMountDevice struct {
|
||||||
|
MountOptions []string
|
||||||
|
MountPartition string
|
||||||
|
|
||||||
|
mountPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepMountDevice) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
config := state.Get("config").(*Config)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
device := state.Get("device").(string)
|
||||||
|
if config.NVMEDevicePath != "" {
|
||||||
|
// customizable device path for mounting NVME block devices on c5 and m5 HVM
|
||||||
|
device = config.NVMEDevicePath
|
||||||
|
}
|
||||||
|
wrappedCommand := state.Get("wrappedCommand").(CommandWrapper)
|
||||||
|
|
||||||
|
var virtualizationType string
|
||||||
|
if config.FromScratch {
|
||||||
|
virtualizationType = config.AMIVirtType
|
||||||
|
} else {
|
||||||
|
image := state.Get("source_image").(*ec2.Image)
|
||||||
|
virtualizationType = *image.VirtualizationType
|
||||||
|
log.Printf("Source image virtualization type is: %s", virtualizationType)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := config.ctx
|
||||||
|
|
||||||
|
ctx.Data = &mountPathData{Device: filepath.Base(device)}
|
||||||
|
mountPath, err := interpolate.Render(config.MountPath, &ctx)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error preparing mount directory: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
mountPath, err = filepath.Abs(mountPath)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error preparing mount directory: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Mount path: %s", mountPath)
|
||||||
|
|
||||||
|
if err := os.MkdirAll(mountPath, 0755); err != nil {
|
||||||
|
err := fmt.Errorf("Error creating mount directory: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
deviceMount := device
|
||||||
|
|
||||||
|
if virtualizationType == "hvm" && s.MountPartition != "0" {
|
||||||
|
deviceMount = fmt.Sprintf("%s%s", device, s.MountPartition)
|
||||||
|
}
|
||||||
|
state.Put("deviceMount", deviceMount)
|
||||||
|
|
||||||
|
ui.Say("Mounting the root device...")
|
||||||
|
stderr := new(bytes.Buffer)
|
||||||
|
|
||||||
|
// build mount options from mount_options config, useful for nouuid options
|
||||||
|
// or other specific device type settings for mount
|
||||||
|
opts := ""
|
||||||
|
if len(s.MountOptions) > 0 {
|
||||||
|
opts = "-o " + strings.Join(s.MountOptions, " -o ")
|
||||||
|
}
|
||||||
|
mountCommand, err := wrappedCommand(
|
||||||
|
fmt.Sprintf("mount %s %s %s", opts, deviceMount, mountPath))
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error creating mount command: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
log.Printf("[DEBUG] (step mount) mount command is %s", mountCommand)
|
||||||
|
cmd := ShellCommand(mountCommand)
|
||||||
|
cmd.Stderr = stderr
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
err := fmt.Errorf(
|
||||||
|
"Error mounting root volume: %s\nStderr: %s", err, stderr.String())
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the mount path so we remember to unmount it later
|
||||||
|
s.mountPath = mountPath
|
||||||
|
state.Put("mount_path", s.mountPath)
|
||||||
|
state.Put("mount_device_cleanup", s)
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepMountDevice) Cleanup(state multistep.StateBag) {
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
if err := s.CleanupFunc(state); err != nil {
|
||||||
|
ui.Error(err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepMountDevice) CleanupFunc(state multistep.StateBag) error {
|
||||||
|
if s.mountPath == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
wrappedCommand := state.Get("wrappedCommand").(CommandWrapper)
|
||||||
|
|
||||||
|
ui.Say("Unmounting the root device...")
|
||||||
|
unmountCommand, err := wrappedCommand(fmt.Sprintf("umount %s", s.mountPath))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating unmount command: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := ShellCommand(unmountCommand)
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return fmt.Errorf("Error unmounting root device: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mountPath = ""
|
||||||
|
return nil
|
||||||
|
}
|
11
builder/amazon/chroot/step_mount_device_test.go
Normal file
11
builder/amazon/chroot/step_mount_device_test.go
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestMountDeviceCleanupFunc_ImplementsCleanupFunc(t *testing.T) {
|
||||||
|
var raw interface{}
|
||||||
|
raw = new(StepMountDevice)
|
||||||
|
if _, ok := raw.(Cleanup); !ok {
|
||||||
|
t.Fatalf("cleanup func should be a CleanupFunc")
|
||||||
|
}
|
||||||
|
}
|
137
builder/amazon/chroot/step_mount_extra.go
Normal file
137
builder/amazon/chroot/step_mount_extra.go
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StepMountExtra mounts the attached device.
|
||||||
|
//
|
||||||
|
// Produces:
|
||||||
|
// mount_extra_cleanup CleanupFunc - To perform early cleanup
|
||||||
|
type StepMountExtra struct {
|
||||||
|
mounts []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepMountExtra) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
config := state.Get("config").(*Config)
|
||||||
|
mountPath := state.Get("mount_path").(string)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
wrappedCommand := state.Get("wrappedCommand").(CommandWrapper)
|
||||||
|
|
||||||
|
s.mounts = make([]string, 0, len(config.ChrootMounts))
|
||||||
|
|
||||||
|
ui.Say("Mounting additional paths within the chroot...")
|
||||||
|
for _, mountInfo := range config.ChrootMounts {
|
||||||
|
innerPath := mountPath + mountInfo[2]
|
||||||
|
|
||||||
|
if err := os.MkdirAll(innerPath, 0755); err != nil {
|
||||||
|
err := fmt.Errorf("Error creating mount directory: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := "-t " + mountInfo[0]
|
||||||
|
if mountInfo[0] == "bind" {
|
||||||
|
flags = "--bind"
|
||||||
|
}
|
||||||
|
|
||||||
|
ui.Message(fmt.Sprintf("Mounting: %s", mountInfo[2]))
|
||||||
|
stderr := new(bytes.Buffer)
|
||||||
|
mountCommand, err := wrappedCommand(fmt.Sprintf(
|
||||||
|
"mount %s %s %s",
|
||||||
|
flags,
|
||||||
|
mountInfo[1],
|
||||||
|
innerPath))
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error creating mount command: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := ShellCommand(mountCommand)
|
||||||
|
cmd.Stderr = stderr
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
err := fmt.Errorf(
|
||||||
|
"Error mounting: %s\nStderr: %s", err, stderr.String())
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mounts = append(s.mounts, innerPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
state.Put("mount_extra_cleanup", s)
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepMountExtra) Cleanup(state multistep.StateBag) {
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
if err := s.CleanupFunc(state); err != nil {
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepMountExtra) CleanupFunc(state multistep.StateBag) error {
|
||||||
|
if s.mounts == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
wrappedCommand := state.Get("wrappedCommand").(CommandWrapper)
|
||||||
|
for len(s.mounts) > 0 {
|
||||||
|
var path string
|
||||||
|
lastIndex := len(s.mounts) - 1
|
||||||
|
path, s.mounts = s.mounts[lastIndex], s.mounts[:lastIndex]
|
||||||
|
|
||||||
|
grepCommand, err := wrappedCommand(fmt.Sprintf("grep %s /proc/mounts", path))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating grep command: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Before attempting to unmount,
|
||||||
|
// check to see if path is already unmounted
|
||||||
|
stderr := new(bytes.Buffer)
|
||||||
|
cmd := ShellCommand(grepCommand)
|
||||||
|
cmd.Stderr = stderr
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
if exitError, ok := err.(*exec.ExitError); ok {
|
||||||
|
if status, ok := exitError.Sys().(syscall.WaitStatus); ok {
|
||||||
|
exitStatus := status.ExitStatus()
|
||||||
|
if exitStatus == 1 {
|
||||||
|
// path has already been unmounted
|
||||||
|
// just skip this path
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unmountCommand, err := wrappedCommand(fmt.Sprintf("umount %s", path))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating unmount command: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stderr = new(bytes.Buffer)
|
||||||
|
cmd = ShellCommand(unmountCommand)
|
||||||
|
cmd.Stderr = stderr
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"Error unmounting device: %s\nStderr: %s", err, stderr.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mounts = nil
|
||||||
|
return nil
|
||||||
|
}
|
11
builder/amazon/chroot/step_mount_extra_test.go
Normal file
11
builder/amazon/chroot/step_mount_extra_test.go
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestMountExtraCleanupFunc_ImplementsCleanupFunc(t *testing.T) {
|
||||||
|
var raw interface{}
|
||||||
|
raw = new(StepMountExtra)
|
||||||
|
if _, ok := raw.(Cleanup); !ok {
|
||||||
|
t.Fatalf("cleanup func should be a CleanupFunc")
|
||||||
|
}
|
||||||
|
}
|
47
builder/amazon/chroot/step_post_mount_commands.go
Normal file
47
builder/amazon/chroot/step_post_mount_commands.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type postMountCommandsData struct {
|
||||||
|
Device string
|
||||||
|
MountPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
// StepPostMountCommands allows running arbitrary commands after mounting the
|
||||||
|
// device, but prior to the bind mount and copy steps.
|
||||||
|
type StepPostMountCommands struct {
|
||||||
|
Commands []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepPostMountCommands) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
config := state.Get("config").(*Config)
|
||||||
|
device := state.Get("device").(string)
|
||||||
|
mountPath := state.Get("mount_path").(string)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
wrappedCommand := state.Get("wrappedCommand").(CommandWrapper)
|
||||||
|
|
||||||
|
if len(s.Commands) == 0 {
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := config.ctx
|
||||||
|
ctx.Data = &postMountCommandsData{
|
||||||
|
Device: device,
|
||||||
|
MountPath: mountPath,
|
||||||
|
}
|
||||||
|
|
||||||
|
ui.Say("Running post-mount commands...")
|
||||||
|
if err := RunLocalCommands(s.Commands, wrappedCommand, ctx, ui); err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepPostMountCommands) Cleanup(state multistep.StateBag) {}
|
41
builder/amazon/chroot/step_pre_mount_commands.go
Normal file
41
builder/amazon/chroot/step_pre_mount_commands.go
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type preMountCommandsData struct {
|
||||||
|
Device string
|
||||||
|
}
|
||||||
|
|
||||||
|
// StepPreMountCommands sets up the a new block device when building from scratch
|
||||||
|
type StepPreMountCommands struct {
|
||||||
|
Commands []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepPreMountCommands) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
config := state.Get("config").(*Config)
|
||||||
|
device := state.Get("device").(string)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
wrappedCommand := state.Get("wrappedCommand").(CommandWrapper)
|
||||||
|
|
||||||
|
if len(s.Commands) == 0 {
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := config.ctx
|
||||||
|
ctx.Data = &preMountCommandsData{Device: device}
|
||||||
|
|
||||||
|
ui.Say("Running device setup commands...")
|
||||||
|
if err := RunLocalCommands(s.Commands, wrappedCommand, ctx, ui); err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepPreMountCommands) Cleanup(state multistep.StateBag) {}
|
46
builder/amazon/chroot/step_prepare_device.go
Normal file
46
builder/amazon/chroot/step_prepare_device.go
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StepPrepareDevice finds an available device and sets it.
|
||||||
|
type StepPrepareDevice struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepPrepareDevice) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
config := state.Get("config").(*Config)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
device := config.DevicePath
|
||||||
|
if device == "" {
|
||||||
|
var err error
|
||||||
|
log.Println("Device path not specified, searching for available device...")
|
||||||
|
device, err = AvailableDevice()
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error finding available device: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(device); err == nil {
|
||||||
|
err := fmt.Errorf("Device is in use: %s", device)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Device: %s", device)
|
||||||
|
state.Put("device", device)
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepPrepareDevice) Cleanup(state multistep.StateBag) {}
|
136
builder/amazon/chroot/step_register_ami.go
Normal file
136
builder/amazon/chroot/step_register_ami.go
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StepRegisterAMI creates the AMI.
|
||||||
|
type StepRegisterAMI struct {
|
||||||
|
RootVolumeSize int64
|
||||||
|
EnableAMIENASupport bool
|
||||||
|
EnableAMISriovNetSupport bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepRegisterAMI) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
config := state.Get("config").(*Config)
|
||||||
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
|
snapshotId := state.Get("snapshot_id").(string)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
ui.Say("Registering the AMI...")
|
||||||
|
|
||||||
|
var (
|
||||||
|
registerOpts *ec2.RegisterImageInput
|
||||||
|
mappings []*ec2.BlockDeviceMapping
|
||||||
|
image *ec2.Image
|
||||||
|
rootDeviceName string
|
||||||
|
)
|
||||||
|
|
||||||
|
if config.FromScratch {
|
||||||
|
mappings = config.AMIBlockDevices.BuildAMIDevices()
|
||||||
|
rootDeviceName = config.RootDeviceName
|
||||||
|
} else {
|
||||||
|
image = state.Get("source_image").(*ec2.Image)
|
||||||
|
mappings = image.BlockDeviceMappings
|
||||||
|
rootDeviceName = *image.RootDeviceName
|
||||||
|
}
|
||||||
|
|
||||||
|
newMappings := make([]*ec2.BlockDeviceMapping, len(mappings))
|
||||||
|
for i, device := range mappings {
|
||||||
|
newDevice := device
|
||||||
|
if *newDevice.DeviceName == rootDeviceName {
|
||||||
|
if newDevice.Ebs != nil {
|
||||||
|
newDevice.Ebs.SnapshotId = aws.String(snapshotId)
|
||||||
|
} else {
|
||||||
|
newDevice.Ebs = &ec2.EbsBlockDevice{SnapshotId: aws.String(snapshotId)}
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.FromScratch || s.RootVolumeSize > *newDevice.Ebs.VolumeSize {
|
||||||
|
newDevice.Ebs.VolumeSize = aws.Int64(s.RootVolumeSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// assume working from a snapshot, so we unset the Encrypted field if set,
|
||||||
|
// otherwise AWS API will return InvalidParameter
|
||||||
|
if newDevice.Ebs != nil && newDevice.Ebs.Encrypted != nil {
|
||||||
|
newDevice.Ebs.Encrypted = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
newMappings[i] = newDevice
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.FromScratch {
|
||||||
|
registerOpts = &ec2.RegisterImageInput{
|
||||||
|
Name: &config.AMIName,
|
||||||
|
Architecture: aws.String(ec2.ArchitectureValuesX8664),
|
||||||
|
RootDeviceName: aws.String(rootDeviceName),
|
||||||
|
VirtualizationType: aws.String(config.AMIVirtType),
|
||||||
|
BlockDeviceMappings: newMappings,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
registerOpts = buildRegisterOpts(config, image, newMappings)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.EnableAMISriovNetSupport {
|
||||||
|
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
||||||
|
// As of February 2017, this applies to C3, C4, D2, I2, R3, and M4 (excluding m4.16xlarge)
|
||||||
|
registerOpts.SriovNetSupport = aws.String("simple")
|
||||||
|
}
|
||||||
|
if s.EnableAMIENASupport {
|
||||||
|
// Set EnaSupport to true
|
||||||
|
// As of February 2017, this applies to C5, I3, P2, R4, X1, and m4.16xlarge
|
||||||
|
registerOpts.EnaSupport = aws.Bool(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
registerResp, err := ec2conn.RegisterImage(registerOpts)
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", fmt.Errorf("Error registering AMI: %s", err))
|
||||||
|
ui.Error(state.Get("error").(error).Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the AMI ID in the state
|
||||||
|
ui.Say(fmt.Sprintf("AMI: %s", *registerResp.ImageId))
|
||||||
|
amis := make(map[string]string)
|
||||||
|
amis[*ec2conn.Config.Region] = *registerResp.ImageId
|
||||||
|
state.Put("amis", amis)
|
||||||
|
|
||||||
|
ui.Say("Waiting for AMI to become ready...")
|
||||||
|
if err := awscommon.WaitUntilAMIAvailable(ctx, ec2conn, *registerResp.ImageId); err != nil {
|
||||||
|
err := fmt.Errorf("Error waiting for AMI: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepRegisterAMI) Cleanup(state multistep.StateBag) {}
|
||||||
|
|
||||||
|
func buildRegisterOpts(config *Config, image *ec2.Image, mappings []*ec2.BlockDeviceMapping) *ec2.RegisterImageInput {
|
||||||
|
registerOpts := &ec2.RegisterImageInput{
|
||||||
|
Name: &config.AMIName,
|
||||||
|
Architecture: image.Architecture,
|
||||||
|
RootDeviceName: image.RootDeviceName,
|
||||||
|
BlockDeviceMappings: mappings,
|
||||||
|
VirtualizationType: image.VirtualizationType,
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.AMIVirtType != "" {
|
||||||
|
registerOpts.VirtualizationType = aws.String(config.AMIVirtType)
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.AMIVirtType != "hvm" {
|
||||||
|
registerOpts.KernelId = image.KernelId
|
||||||
|
registerOpts.RamdiskId = image.RamdiskId
|
||||||
|
}
|
||||||
|
return registerOpts
|
||||||
|
}
|
73
builder/amazon/chroot/step_register_ami_test.go
Normal file
73
builder/amazon/chroot/step_register_ami_test.go
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
package chroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testImage() ec2.Image {
|
||||||
|
return ec2.Image{
|
||||||
|
ImageId: aws.String("ami-abcd1234"),
|
||||||
|
Name: aws.String("ami_test_name"),
|
||||||
|
Architecture: aws.String("x86_64"),
|
||||||
|
KernelId: aws.String("aki-abcd1234"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStepRegisterAmi_buildRegisterOpts_pv(t *testing.T) {
|
||||||
|
config := Config{}
|
||||||
|
config.AMIName = "test_ami_name"
|
||||||
|
config.AMIDescription = "test_ami_description"
|
||||||
|
config.AMIVirtType = "paravirtual"
|
||||||
|
|
||||||
|
image := testImage()
|
||||||
|
|
||||||
|
blockDevices := []*ec2.BlockDeviceMapping{}
|
||||||
|
|
||||||
|
opts := buildRegisterOpts(&config, &image, blockDevices)
|
||||||
|
|
||||||
|
expected := config.AMIVirtType
|
||||||
|
if *opts.VirtualizationType != expected {
|
||||||
|
t.Fatalf("Unexpected VirtType value: expected %s got %s\n", expected, *opts.VirtualizationType)
|
||||||
|
}
|
||||||
|
|
||||||
|
expected = config.AMIName
|
||||||
|
if *opts.Name != expected {
|
||||||
|
t.Fatalf("Unexpected Name value: expected %s got %s\n", expected, *opts.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
expected = *image.KernelId
|
||||||
|
if *opts.KernelId != expected {
|
||||||
|
t.Fatalf("Unexpected KernelId value: expected %s got %s\n", expected, *opts.KernelId)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStepRegisterAmi_buildRegisterOpts_hvm(t *testing.T) {
|
||||||
|
config := Config{}
|
||||||
|
config.AMIName = "test_ami_name"
|
||||||
|
config.AMIDescription = "test_ami_description"
|
||||||
|
config.AMIVirtType = "hvm"
|
||||||
|
|
||||||
|
image := testImage()
|
||||||
|
|
||||||
|
blockDevices := []*ec2.BlockDeviceMapping{}
|
||||||
|
|
||||||
|
opts := buildRegisterOpts(&config, &image, blockDevices)
|
||||||
|
|
||||||
|
expected := config.AMIVirtType
|
||||||
|
if *opts.VirtualizationType != expected {
|
||||||
|
t.Fatalf("Unexpected VirtType value: expected %s got %s\n", expected, *opts.VirtualizationType)
|
||||||
|
}
|
||||||
|
|
||||||
|
expected = config.AMIName
|
||||||
|
if *opts.Name != expected {
|
||||||
|
t.Fatalf("Unexpected Name value: expected %s got %s\n", expected, *opts.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.KernelId != nil {
|
||||||
|
t.Fatalf("Unexpected KernelId value: expected nil got %s\n", *opts.KernelId)
|
||||||
|
}
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user