Compare commits
10 Commits
master
...
separate_c
Author | SHA1 | Date | |
---|---|---|---|
|
0984f4698b | ||
|
c9fbb10c6e | ||
|
203f9f764e | ||
|
12e1831b56 | ||
|
fa65c04280 | ||
|
6dcd87a779 | ||
|
2eaaf7218b | ||
|
67e856aaca | ||
|
7be0cf428a | ||
|
bfee3b8f5b |
@ -1,12 +1,13 @@
|
||||
orbs:
|
||||
win: circleci/windows@1.0.0
|
||||
codecov: codecov/codecov@1.0.5
|
||||
|
||||
version: 2.1
|
||||
|
||||
executors:
|
||||
golang:
|
||||
docker:
|
||||
- image: docker.mirror.hashicorp.services/circleci/golang:1.16
|
||||
- image: docker.mirror.hashicorp.services/circleci/golang:1.15
|
||||
resource_class: medium+
|
||||
darwin:
|
||||
macos:
|
||||
@ -19,13 +20,10 @@ commands:
|
||||
type: string
|
||||
GOVERSION:
|
||||
type: string
|
||||
HOME:
|
||||
type: string
|
||||
default: "~"
|
||||
steps:
|
||||
- checkout
|
||||
- run: curl https://dl.google.com/go/go<< parameters.GOVERSION >>.<< parameters.GOOS >>-amd64.tar.gz | tar -C << parameters.HOME >>/ -xz
|
||||
- run: << parameters.HOME >>/go/bin/go test ./... -coverprofile=coverage.txt -covermode=atomic
|
||||
- run: curl https://dl.google.com/go/go<< parameters.GOVERSION >>.<< parameters.GOOS >>-amd64.tar.gz | tar -C ~/ -xz
|
||||
- run: ~/go/bin/go test ./... -coverprofile=coverage.txt -covermode=atomic
|
||||
install-go-run-tests-windows:
|
||||
parameters:
|
||||
GOVERSION:
|
||||
@ -39,19 +37,15 @@ commands:
|
||||
parameters:
|
||||
GOOS:
|
||||
type: string
|
||||
GOARCH:
|
||||
default: "amd64"
|
||||
type: string
|
||||
steps:
|
||||
- checkout
|
||||
- run: GOOS=<< parameters.GOOS >> GOARCH=<<parameters.GOARCH>> go build -ldflags="-s -w -X github.com/hashicorp/packer/version.GitCommit=${CIRCLE_SHA1}" -o ./pkg/packer_<< parameters.GOOS >>_<< parameters.GOARCH >> .
|
||||
- run: zip ./pkg/packer_<< parameters.GOOS >>_<< parameters.GOARCH >>.zip ./pkg/packer_<< parameters.GOOS >>_<< parameters.GOARCH >>
|
||||
- run: rm ./pkg/packer_<< parameters.GOOS >>_<< parameters.GOARCH >>
|
||||
- run: GOOS=<< parameters.GOOS >> go build -ldflags="-s -w -X github.com/hashicorp/packer/version.GitCommit=${CIRCLE_SHA1}" -o ./pkg/packer_<< parameters.GOOS >>_$(go env GOARCH) .
|
||||
- run: zip ./pkg/packer_<< parameters.GOOS >>_$(go env GOARCH).zip ./pkg/packer_<< parameters.GOOS >>_$(go env GOARCH)
|
||||
- run: rm ./pkg/packer_<< parameters.GOOS >>_$(go env GOARCH)
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- ./pkg/
|
||||
|
||||
# Golang CircleCI 2.0 configuration file
|
||||
#
|
||||
# Check https://circleci.com/docs/2.0/language-go/ for more details
|
||||
@ -63,20 +57,28 @@ jobs:
|
||||
steps:
|
||||
- checkout
|
||||
- run: TESTARGS="-coverprofile=coverage.txt -covermode=atomic" make ci
|
||||
- codecov/upload:
|
||||
file: coverage.txt
|
||||
test-darwin:
|
||||
executor: darwin
|
||||
working_directory: ~/go/github.com/hashicorp/packer
|
||||
working_directory: ~/go/src/github.com/hashicorp/packer
|
||||
environment:
|
||||
GO111MODULE: "off"
|
||||
steps:
|
||||
- install-go-run-tests-unix:
|
||||
GOOS: darwin
|
||||
GOVERSION: "1.16"
|
||||
GOVERSION: "1.15"
|
||||
- codecov/upload:
|
||||
file: coverage.txt
|
||||
test-windows:
|
||||
executor:
|
||||
name: win/vs2019
|
||||
shell: bash.exe
|
||||
steps:
|
||||
- install-go-run-tests-windows:
|
||||
GOVERSION: "1.16"
|
||||
GOVERSION: "1.15"
|
||||
- codecov/upload:
|
||||
file: coverage.txt
|
||||
check-lint:
|
||||
executor: golang
|
||||
resource_class: xlarge
|
||||
@ -86,6 +88,15 @@ jobs:
|
||||
- run:
|
||||
command: make ci-lint
|
||||
no_output_timeout: 30m
|
||||
check-vendor-vs-mod:
|
||||
executor: golang
|
||||
working_directory: /go/src/github.com/hashicorp/packer
|
||||
environment:
|
||||
GO111MODULE: "off"
|
||||
steps:
|
||||
- checkout
|
||||
- run: GO111MODULE=on go run . --help
|
||||
- run: make check-vendor-vs-mod
|
||||
check-fmt:
|
||||
executor: golang
|
||||
steps:
|
||||
@ -114,13 +125,6 @@ jobs:
|
||||
steps:
|
||||
- build-and-persist-packer-binary:
|
||||
GOOS: darwin
|
||||
build_darwin_arm64:
|
||||
executor: golang
|
||||
working_directory: /go/src/github.com/hashicorp/packer
|
||||
steps:
|
||||
- build-and-persist-packer-binary:
|
||||
GOOS: darwin
|
||||
GOARCH: arm64
|
||||
build_freebsd:
|
||||
executor: golang
|
||||
working_directory: /go/src/github.com/hashicorp/packer
|
||||
@ -194,13 +198,13 @@ workflows:
|
||||
check-code:
|
||||
jobs:
|
||||
- check-lint
|
||||
- check-vendor-vs-mod
|
||||
- check-fmt
|
||||
- check-generate
|
||||
build_packer_binaries:
|
||||
jobs:
|
||||
- build_linux
|
||||
- build_darwin
|
||||
- build_darwin_arm64
|
||||
- build_windows
|
||||
- build_freebsd
|
||||
- build_openbsd
|
||||
@ -209,7 +213,6 @@ workflows:
|
||||
requires:
|
||||
- build_linux
|
||||
- build_darwin
|
||||
- build_darwin_arm64
|
||||
- build_windows
|
||||
- build_freebsd
|
||||
- build_openbsd
|
||||
|
18
.codecov.yml
Normal file
18
.codecov.yml
Normal file
@ -0,0 +1,18 @@
|
||||
comment:
|
||||
layout: "flags, files"
|
||||
behavior: default
|
||||
require_changes: true # only comment on changes in coverage
|
||||
require_base: yes # [yes :: must have a base report to post]
|
||||
require_head: yes # [yes :: must have a head report to post]
|
||||
after_n_builds: 3 # wait for all OS test coverage builds to post comment
|
||||
branches: # branch names that can post comment
|
||||
- "master"
|
||||
|
||||
coverage:
|
||||
status:
|
||||
project: off
|
||||
patch: off
|
||||
|
||||
ignore: # ignore hcl2spec generated code for coverage and mocks
|
||||
- "**/*.hcl2spec.go"
|
||||
- "**/*_mock.go"
|
1
.gitattributes
vendored
1
.gitattributes
vendored
@ -6,7 +6,6 @@
|
||||
*.mdx text eol=lf
|
||||
*.ps1 text eol=lf
|
||||
*.hcl text eol=lf
|
||||
*.tmpl text eol=lf
|
||||
*.txt text eol=lf
|
||||
go.mod text eol=lf
|
||||
go.sum text eol=lf
|
||||
|
348
.github/CONTRIBUTING.md
vendored
348
.github/CONTRIBUTING.md
vendored
@ -11,12 +11,6 @@ contribute to the project, read on. This document will cover what we're looking
|
||||
for. By addressing all the points we're looking for, it raises the chances we
|
||||
can quickly merge or address your contributions.
|
||||
|
||||
When contributing in any way to the Packer project (new issue, PR, etc), please
|
||||
be aware that our team identifies with many gender pronouns. Please remember to
|
||||
use nonbinary pronouns (they/them) and gender neutral language ("Hello folks")
|
||||
when addressing our team. For more reading on our code of conduct, please see the
|
||||
[HashiCorp community guidelines](https://www.hashicorp.com/community-guidelines).
|
||||
|
||||
## Issues
|
||||
|
||||
### Reporting an Issue
|
||||
@ -25,7 +19,7 @@ when addressing our team. For more reading on our code of conduct, please see th
|
||||
already fixed the bug you're experiencing.
|
||||
|
||||
- Run the command with debug output with the environment variable `PACKER_LOG`.
|
||||
For example: `PACKER_LOG=1 packer build template.pkr.hcl`. Take the _entire_
|
||||
For example: `PACKER_LOG=1 packer build template.json`. Take the _entire_
|
||||
output and create a [gist](https://gist.github.com) for linking to in your
|
||||
issue. Packer should strip sensitive keys from the output, but take a look
|
||||
through just in case.
|
||||
@ -70,9 +64,7 @@ when addressing our team. For more reading on our code of conduct, please see th
|
||||
If you have never worked with Go before, you will have to install its
|
||||
runtime in order to build packer.
|
||||
|
||||
1. This project always releases from the latest version of golang.
|
||||
[Install go](https://golang.org/doc/install#install) To properly build from
|
||||
source, you need to have golang >= v1.16
|
||||
1. This project always releases from the latest version of golang. [Install go](https://golang.org/doc/install#install)
|
||||
|
||||
## Setting up Packer for dev
|
||||
|
||||
@ -80,6 +72,7 @@ If/when you have go installed you can already `go get` packer and `make` in
|
||||
order to compile and test Packer. These instructions target
|
||||
POSIX-like environments (macOS, Linux, Cygwin, etc.) so you may need to
|
||||
adjust them for Windows or other shells.
|
||||
The instructions below are for go 1.7. or later.
|
||||
|
||||
1. Download the Packer source (and its dependencies) by running
|
||||
`go get github.com/hashicorp/packer`. This will download the Packer source to
|
||||
@ -98,7 +91,7 @@ adjust them for Windows or other shells.
|
||||
4. After running building Packer successfully, use
|
||||
`$GOPATH/src/github.com/hashicorp/packer/bin/packer` to build a machine and
|
||||
verify your changes work. For instance:
|
||||
`$GOPATH/src/github.com/hashicorp/packer/bin/packer build template.pkr.hcl`.
|
||||
`$GOPATH/src/github.com/hashicorp/packer/bin/packer build template.json`.
|
||||
|
||||
5. If everything works well and the tests pass, run `go fmt` on your code before
|
||||
submitting a pull-request.
|
||||
@ -266,7 +259,7 @@ does not attempt to track the latest version for each dependency.
|
||||
#### Code generation
|
||||
|
||||
Packer relies on `go generate` to generate a [peg parser for boot
|
||||
commands](https://github.com/hashicorp/packer/blob/master/packer-plugin-sdk/bootcommand/boot_command.go),
|
||||
commands](https://github.com/hashicorp/packer/blob/master/common/bootcommand/boot_command.go),
|
||||
[docs](https://github.com/hashicorp/packer/blob/master/website/pages/partials/builder/amazon/chroot/_Config-not-required.mdx)
|
||||
and HCL2's bridging code. Packer's testing suite will run `make check-generate`
|
||||
to check that all the generated files Packer needs are what they should be.
|
||||
@ -308,15 +301,15 @@ You can run tests for individual packages using commands like this:
|
||||
make test TEST=./builder/amazon/...
|
||||
```
|
||||
|
||||
#### Running Builder Acceptance Tests
|
||||
#### Running Acceptance Tests
|
||||
|
||||
Packer has [acceptance tests](https://en.wikipedia.org/wiki/Acceptance_testing)
|
||||
for various builders. These typically require an API key (AWS, GCE), or
|
||||
additional software to be installed on your computer (VirtualBox, VMware).
|
||||
|
||||
If you're working on a new builder or builder feature and want to verify it is
|
||||
functioning (and also hasn't broken anything else), we recommend creating or
|
||||
running the acceptance tests.
|
||||
functioning (and also hasn't broken anything else), we recommend running the
|
||||
acceptance tests.
|
||||
|
||||
**Warning:** The acceptance tests create/destroy/modify _real resources_, which
|
||||
may incur costs for real money. In the presence of a bug, it is possible that
|
||||
@ -347,7 +340,7 @@ Acceptance tests typically require other environment variables to be set for
|
||||
things such as API tokens and keys. Each test should error and tell you which
|
||||
credentials are missing, so those are not documented here.
|
||||
|
||||
#### Running Provisioner Acceptance Tests
|
||||
#### Running Provisioners Acceptance Tests
|
||||
|
||||
**Warning:** The acceptance tests create/destroy/modify _real resources_, which
|
||||
may incur costs for real money. In the presence of a bug, it is possible that
|
||||
@ -358,165 +351,125 @@ resources are not accidentally destroyed or overwritten during testing.
|
||||
Also, these typically require an API key (AWS, GCE), or additional software
|
||||
to be installed on your computer (VirtualBox, VMware).
|
||||
|
||||
To run the Provisioners Acceptance Tests you should use the
|
||||
**ACC_TEST_BUILDERS** environment variable to tell the tests which builder the
|
||||
test should be run against.
|
||||
To run the Provisioners Acceptance Tests you should use both **ACC_TEST_BUILDERS** and **ACC_TEST_PROVISIONERS** variables to
|
||||
tell which provisioner and builder the test should be run against.
|
||||
|
||||
Examples of usage:
|
||||
|
||||
- Run the Shell provisioner acceptance tests against the Amazon EBS builder.
|
||||
```
|
||||
ACC_TEST_BUILDERS=amazon-ebs go test ./provisioner/shell/... -v -timeout=1h
|
||||
ACC_TEST_BUILDERS=amazon-ebs ACC_TEST_PROVISIONERS=shell go test ./provisioner/shell/... -v -timeout=1h
|
||||
```
|
||||
- Do the same but using the Makefile
|
||||
```
|
||||
ACC_TEST_BUILDERS=amazon-ebs make provisioners-acctest TEST=./provisioner/shell
|
||||
ACC_TEST_BUILDERS=amazon-ebs ACC_TEST_PROVISIONERS=shell make provisioners-acctest
|
||||
```
|
||||
- Run all provisioner acceptance tests against the Amazon EBS builder.
|
||||
- Run the all Shell and Powershell provisioners acceptance tests against the Amazon EBS builder.
|
||||
```
|
||||
ACC_TEST_BUILDERS=amazon-ebs make provisioners-acctest TEST=./...
|
||||
ACC_TEST_BUILDERS=amazon-ebs ACC_TEST_PROVISIONERS=shell,powershell make provisioners-acctest
|
||||
```
|
||||
- Run all provisioner acceptance tests against all builders whenever they are compatible.
|
||||
- Run the all provisioners acceptance tests against the Amazon EBS builder.
|
||||
```
|
||||
ACC_TEST_BUILDERS=all make provisioners-acctest TEST=./...
|
||||
ACC_TEST_BUILDERS=amazon-ebs ACC_TEST_PROVISIONERS=all make provisioners-acctest
|
||||
```
|
||||
- Run the all provisioners acceptance tests against all builders whenever they are compatible.
|
||||
```
|
||||
ACC_TEST_BUILDERS=all ACC_TEST_PROVISIONERS=all make provisioners-acctest
|
||||
```
|
||||
|
||||
The **ACC_TEST_BUILDERS** env variable accepts a list of builders separated by
|
||||
commas. (e.g. `ACC_TEST_BUILDERS=amazon-ebs,virtualbox-iso`)
|
||||
Both **ACC_TEST_BUILDERS** and **ACC_TEST_PROVISIONERS** allows defining a list of builders and provisioners separated by comma
|
||||
(e.g. `ACC_TEST_BUILDERS=amazon-ebs,virtualbox-iso`)
|
||||
|
||||
|
||||
#### Writing Provisioner Acceptance Tests
|
||||
|
||||
Packer has implemented a `ProvisionerTestCase` structure to help write
|
||||
provisioner acceptance tests.
|
||||
Packer has an already implemented structure that will run the provisioner against builders and you can find it in `helper/tests/acc/provisioners.go`.
|
||||
All provisioners should use this structure in their acceptance tests.
|
||||
|
||||
To start writing a new provisioner acceptance test, you should add a test file named as `provisioner_acc_test.go` in the provisioner folder
|
||||
and the package should be `<provisioner>_test`. This file should have a struct that will implement the ProvisionerAcceptance interface.
|
||||
|
||||
```go
|
||||
type ProvisionerTestCase struct {
|
||||
// Check is called after this step is executed in order to test that
|
||||
// the step executed successfully. If this is not set, then the next
|
||||
// step will be called
|
||||
Check func(*exec.Cmd, string) error
|
||||
// IsCompatible checks whether a provisioner is able to run against a
|
||||
// given builder type and guest operating system, and returns a boolean.
|
||||
// if it returns true, the test combination is okay to run. If false, the
|
||||
// test combination is not okay to run.
|
||||
IsCompatible func(builderType string, BuilderGuestOS string) bool
|
||||
// Name is the name of the test case. Be simple but unique and descriptive.
|
||||
Name string
|
||||
// Setup, if non-nil, will be called once before the test case
|
||||
// runs. This can be used for some setup like setting environment
|
||||
// variables, or for validation prior to the
|
||||
// test running. For example, you can use this to make sure certain
|
||||
// binaries are installed, or text fixtures are in place.
|
||||
Setup func() error
|
||||
// Teardown will be called before the test case is over regardless
|
||||
// of if the test succeeded or failed. This should return an error
|
||||
// in the case that the test can't guarantee all resources were
|
||||
// properly cleaned up.
|
||||
Teardown builderT.TestTeardownFunc
|
||||
// Template is the provisioner template to use.
|
||||
// The provisioner template fragment must be a json-formatted string
|
||||
// containing the provisioner definition but no other portions of a packer
|
||||
// template. For
|
||||
// example:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "type": "shell-local",
|
||||
// "inline", ["echo hello world"]
|
||||
// }
|
||||
//```
|
||||
//
|
||||
// is a valid entry for "template" here, but the complete Packer template:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "provisioners": [
|
||||
// {
|
||||
// "type": "shell-local",
|
||||
// "inline", ["echo hello world"]
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// is invalid as input.
|
||||
//
|
||||
// You may provide multiple provisioners in the same template. For example:
|
||||
// ```json
|
||||
// {
|
||||
// "type": "shell-local",
|
||||
// "inline", ["echo hello world"]
|
||||
// },
|
||||
// {
|
||||
// "type": "shell-local",
|
||||
// "inline", ["echo hello world 2"]
|
||||
// }
|
||||
// ```
|
||||
Template string
|
||||
// Type is the type of provisioner.
|
||||
Type string
|
||||
type ProvisionerAcceptance interface {
|
||||
GetName() string
|
||||
GetConfig() (string, error)
|
||||
GetProvisionerStore() packer.MapOfProvisioner
|
||||
IsCompatible(builder string, vmOS string) bool
|
||||
RunTest(c *command.BuildCommand, args []string) error
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
To start writing a new provisioner acceptance test, you should add a test file
|
||||
named `provisioner_acc_test.go` in the same folder as your provisioner is
|
||||
defined. Create a test case by implementing the above struct, and run it
|
||||
by calling `provisioneracc.TestProvisionersAgainstBuilders(testCase, t)`
|
||||
- **GetName()** should return the provisioner type. For example for the Shell provisioner the method returns "shell".
|
||||
|
||||
The following example has been adapted from a shell-local provisioner test:
|
||||
- **GetConfig()** should read a text file with the json configuration block for the provisioner and any other necessary provisioner.
|
||||
For the Shell one the file contains:
|
||||
|
||||
```
|
||||
import (
|
||||
"github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc"
|
||||
"github.com/hashicorp/packer-plugin-sdk/acctest/testutils"
|
||||
)
|
||||
|
||||
// ...
|
||||
|
||||
func TestAccShellProvisioner_basic(t *testing.T) {
|
||||
// Create a json template fragment containing just the provisioners you want
|
||||
// to run.
|
||||
templateString := `{
|
||||
"type": "shell-local",
|
||||
"script": "test-fixtures/script.sh",
|
||||
"max_retries" : 5
|
||||
}`
|
||||
|
||||
// instantiate a test case.
|
||||
testCase := &provisioneracc.ProvisionerTestCase{
|
||||
IsCompatible: func() bool {return true},
|
||||
Name: "shell-local-provisioner-basic",
|
||||
Teardown: func() error {
|
||||
testutils.CleanupFiles("test-fixtures/file.txt")
|
||||
return nil
|
||||
{
|
||||
"type": "shell",
|
||||
"inline": [
|
||||
"echo {{ build `ID`}} > provisioner.{{ build `PackerRunUUID`}}.txt"
|
||||
]
|
||||
},
|
||||
Template: templateString,
|
||||
Type: "shell-local",
|
||||
Check: func(buildcommand *exec.Cmd, logfile string) error {
|
||||
if buildcommand.ProcessState != nil {
|
||||
if buildcommand.ProcessState.ExitCode() != 0 {
|
||||
return fmt.Errorf("Bad exit code. Logfile: %s", logfile)
|
||||
{
|
||||
"type": "file",
|
||||
"source": "provisioner.{{ build `PackerRunUUID`}}.txt",
|
||||
"destination": "provisioner.shell.{{ build `PackerRunUUID`}}.txt",
|
||||
"direction": "download"
|
||||
}
|
||||
```
|
||||
The file should be placed under the `test-fixtures` folder.
|
||||
In this case, it's necessary to use the File provisioner to validate if the Shell provisioner test is successful or not.
|
||||
This config should be returned as string that will be later merged with the builder config into a full template.
|
||||
|
||||
- **GetProvisionerStore()** this returns the provisioner store where we declare the available provisioners for running the build.
|
||||
For the Shell provisioners this is:
|
||||
```go
|
||||
func (s *ShellProvisionerAccTest) GetProvisionerStore() packer.MapOfProvisioner {
|
||||
return packer.MapOfProvisioner{
|
||||
"shell": func() (packer.Provisioner, error) { return &shell.Provisioner{}, nil },
|
||||
"file": func() (packer.Provisioner, error) { return &file.Provisioner{}, nil },
|
||||
}
|
||||
}
|
||||
filecontents, err := loadFile("file.txt")
|
||||
if err != nil {
|
||||
return err
|
||||
```
|
||||
|
||||
- **IsCompatible(builder string, vmOS string)** returns true or false whether the provisioner should run against a
|
||||
specific builder or/and specific OS.
|
||||
|
||||
- **RunTest(c \*command.BuildCommand, args []string)** it will actually run the build and return any error if it fails the validations.
|
||||
For the Shell provisioner this is:
|
||||
```go
|
||||
func (s *ShellProvisionerAccTest) RunTest(c *command.BuildCommand, args []string) error {
|
||||
// Provisioner specific setup
|
||||
UUID := os.Getenv("PACKER_RUN_UUID")
|
||||
if UUID == "" {
|
||||
UUID, _ = uuid.GenerateUUID()
|
||||
os.Setenv("PACKER_RUN_UUID", UUID)
|
||||
}
|
||||
if !strings.Contains(filecontents, "hello") {
|
||||
return fmt.Errorf("file contents were wrong: %s", filecontents)
|
||||
file := "provisioner.shell." + UUID + ".txt"
|
||||
defer testshelper.CleanupFiles(file)
|
||||
|
||||
// Run build
|
||||
// All provisioner acc tests should contain this code and validation
|
||||
if code := c.Run(args); code != 0 {
|
||||
ui := c.Meta.Ui.(*packer.BasicUi)
|
||||
out := ui.Writer.(*bytes.Buffer)
|
||||
err := ui.ErrorWriter.(*bytes.Buffer)
|
||||
return fmt.Errorf(
|
||||
"Bad exit code.\n\nStdout:\n\n%s\n\nStderr:\n\n%s",
|
||||
out.String(),
|
||||
err.String())
|
||||
}
|
||||
|
||||
// Any other extra specific validation
|
||||
if !testshelper.FileExists(file) {
|
||||
return fmt.Errorf("Expected to find %s", file)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
provisioneracc.TestProvisionersAgainstBuilders(testCase, t)
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
|
||||
After writing the struct and implementing the interface, now is time to write the test that will run all
|
||||
of this code you wrote. Your test should be like:
|
||||
|
||||
@ -527,101 +480,64 @@ func TestShellProvisioner(t *testing.T) {
|
||||
}
|
||||
```
|
||||
|
||||
The method `TestProvisionersAgainstBuilders` will run the provisioner against
|
||||
all available and compatible builders. If there are not builders compatible with
|
||||
the test you want to run, you can add a builder using the following steps:
|
||||
If the environment variable **ACC_TEST_PROVISIONERS** is set as `all` or contains the provisioner type, then the test should run, otherwise the test should skip.
|
||||
In case of running it, you'll need to call the helper function `acc.TestProvisionersAgainstBuilders` passing a pointer to the test struct created above and the test testing pointer.
|
||||
|
||||
Create a subdirectory in provisioneracc/test-fixtures for the type of builder
|
||||
you are adding. In this subdirectory, add one json file containing a single
|
||||
builder fragment. For example, one of our amazon-ebs builders is defined in
|
||||
provisioneracc/test-fixtures/amazon-ebs/amazon-ebs.txt and contains:
|
||||
The method `TestProvisionersAgainstBuilders` will run the provisioner against all available and compatible builders. An available builder
|
||||
is the one that has the necessary code for running this type of test. In case the builder you want to run against is not available for testing, you can write it following the next steps.
|
||||
|
||||
```json
|
||||
To add a new builder to the available builders for provisioners acc testing, you'll need to create a new folder under the builder folder
|
||||
called `acceptance` and inside you create the `builder_acceptance.go` file and the package should be `<builder>_acc`. Like the provisioners, you'll need to create a struct that will
|
||||
implement the BuilderAcceptance interface.
|
||||
```go
|
||||
type BuilderAcceptance interface {
|
||||
GetConfigs() (map[string]string, error)
|
||||
GetBuilderStore() packer.MapOfBuilder
|
||||
CleanUp() error
|
||||
}
|
||||
```
|
||||
- **GetConfigs()** should read a text file with the json configuration block for the builder and return a map of configs by OS type.
|
||||
For the Amazon EBS builder the file contains:
|
||||
```
|
||||
{
|
||||
"type": "amazon-ebs",
|
||||
"ami_name": "packer-acc-test",
|
||||
"instance_type": "t2.micro",
|
||||
"instance_type": "m1.small",
|
||||
"region": "us-east-1",
|
||||
"ssh_username": "ubuntu",
|
||||
"source_ami_filter": {
|
||||
"filters": {
|
||||
"virtualization-type": "hvm",
|
||||
"name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
|
||||
"root-device-type": "ebs"
|
||||
},
|
||||
"owners": ["099720109477"],
|
||||
"most_recent": true
|
||||
},
|
||||
"source_ami": "ami-0568456c",
|
||||
"force_deregister" : true,
|
||||
"tags": {
|
||||
"packer-test": "true"
|
||||
}
|
||||
}
|
||||
```
|
||||
The file should be placed under the `test-fixtures` folder.
|
||||
In case you need to make references to another file, you'll need to add the relative path to provisioners folder like:
|
||||
`../../builder/amazon/ebs/acceptance/test-fixtures/file.txt`.
|
||||
|
||||
note that this fragment does not contain anything other than a single builder
|
||||
definition. The testing framework will combine this with the provisioner
|
||||
fragment to create a working json template.
|
||||
|
||||
In order to tell the testing framework how to use this builder fragment, you
|
||||
need to implement a `BuilderFixture` struct:
|
||||
|
||||
- **GetBuilderStore()** this returns the builder store where we declare the available builders for running the build.
|
||||
For the Amazon EBS builder this is:
|
||||
```go
|
||||
type BuilderFixture struct {
|
||||
// Name is the name of the builder fixture.
|
||||
// Be simple and descriptive.
|
||||
Name string
|
||||
// Setup creates necessary extra test fixtures, and renders their values
|
||||
// into the BuilderFixture.Template.
|
||||
Setup func()
|
||||
// Template is the path to a builder template fragment.
|
||||
// The builder template fragment must be a json-formatted file containing
|
||||
// the builder definition but no other portions of a packer template. For
|
||||
// example:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "type": "null",
|
||||
// "communicator", "none"
|
||||
// }
|
||||
//```
|
||||
//
|
||||
// is a valid entry for "template" here, but the complete Packer template:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "builders": [
|
||||
// "type": "null",
|
||||
// "communicator": "none"
|
||||
// ]
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// is invalid as input.
|
||||
//
|
||||
// Only provide one builder template fragment per file.
|
||||
TemplatePath string
|
||||
|
||||
// GuestOS says what guest os type the builder template fragment creates.
|
||||
// Valid values are "windows", "linux" or "darwin" guests.
|
||||
GuestOS string
|
||||
|
||||
// HostOS says what host os type the builder is capable of running on.
|
||||
// Valid values are "any", windows", or "posix". If you set "posix", then
|
||||
// this builder can run on a "linux" or "darwin" platform. If you set
|
||||
// "any", then this builder can be used on any platform.
|
||||
HostOS string
|
||||
|
||||
Teardown builderT.TestTeardownFunc
|
||||
func (s *AmazonEBSAccTest) GetBuilderStore() packer.MapOfBuilder {
|
||||
return packer.MapOfBuilder{
|
||||
"amazon-ebs": func() (packer.Builder, error) { return &amazonebsbuilder.Builder{}, nil },
|
||||
}
|
||||
}
|
||||
```
|
||||
Implement this struct to the file "provisioneracc/builders.go", then add
|
||||
the new implementation to the `BuildersAccTest` map in
|
||||
`provisioneracc/provisioners.go`
|
||||
|
||||
Once you finish these steps, you should be ready to run your new provisioner
|
||||
acceptance test by setting the name used in the BuildersAccTest map as your
|
||||
`ACC_TEST_BUILDERS` environment variable.
|
||||
- **CleanUp()** cleans any resource created by the builder whether local or remote.
|
||||
|
||||
Once you created the builder necessary code, the last step is adding it to the `BuildersAccTest` map in `helper/tests/acc/provisioners.go`.
|
||||
```go
|
||||
var BuildersAccTest = map[string]BuilderAcceptance{
|
||||
...
|
||||
"amazon-ebs": new(amazonEBS.AmazonEBSAccTest),
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Once you finish the steps, you should be ready to run your new provisioner acceptance test.
|
||||
|
||||
#### Debugging Plugins
|
||||
|
||||
|
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -10,7 +10,7 @@ Describe the change you are making here!
|
||||
|
||||
Please include tests. Check out these examples:
|
||||
|
||||
- https://github.com/hashicorp/packer/blob/master/builder/parallels/common/ssh_config_test.go#L34
|
||||
- https://github.com/hashicorp/packer/blob/master/builder/virtualbox/common/ssh_config_test.go#L19-L37
|
||||
- https://github.com/hashicorp/packer/blob/master/post-processor/compress/post-processor_test.go#L153-L182
|
||||
|
||||
If your PR resolves any open issue(s), please indicate them like this so they will be closed when your PR is merged:
|
||||
|
5
.github/labeler-issue-triage.yml
vendored
5
.github/labeler-issue-triage.yml
vendored
@ -1,5 +0,0 @@
|
||||
bug:
|
||||
- 'panic:'
|
||||
crash:
|
||||
- 'panic:'
|
||||
|
102
.github/workflows/check-plugin-docs.js
vendored
102
.github/workflows/check-plugin-docs.js
vendored
@ -1,102 +0,0 @@
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
const fetchPluginDocs = require("../../website/components/remote-plugin-docs/utils/fetch-plugin-docs");
|
||||
|
||||
const COLOR_RESET = "\x1b[0m";
|
||||
const COLOR_GREEN = "\x1b[32m";
|
||||
const COLOR_BLUE = "\x1b[34m";
|
||||
const COLOR_RED = "\x1b[31m";
|
||||
|
||||
async function checkPluginDocs() {
|
||||
const failureMessages = [];
|
||||
const pluginsPath = "website/data/docs-remote-plugins.json";
|
||||
const pluginsFile = fs.readFileSync(path.join(process.cwd(), pluginsPath));
|
||||
const pluginEntries = JSON.parse(pluginsFile);
|
||||
const entriesCount = pluginEntries.length;
|
||||
console.log(`\nResolving plugin docs from ${entriesCount} repositories …`);
|
||||
for (var i = 0; i < entriesCount; i++) {
|
||||
const pluginEntry = pluginEntries[i];
|
||||
const { title, repo, version } = pluginEntry;
|
||||
console.log(`\n${COLOR_BLUE}${repo}${COLOR_RESET} | ${title}`);
|
||||
console.log(`Fetching docs from release "${version}" …`);
|
||||
try {
|
||||
// Validate that all required properties are present
|
||||
const undefinedProps = ["title", "repo", "version", "path"].filter(
|
||||
(key) => typeof pluginEntry[key] == "undefined"
|
||||
);
|
||||
if (undefinedProps.length > 0) {
|
||||
throw new Error(
|
||||
`Failed to validate plugin docs config. Undefined configuration properties ${JSON.stringify(
|
||||
undefinedProps
|
||||
)} found for "${
|
||||
title || pluginEntry.path || repo
|
||||
}". In "website/data/docs-remote-plugins.json", please ensure the missing properties ${JSON.stringify(
|
||||
undefinedProps
|
||||
)} are defined. Additional information on this configuration can be found in "website/README.md".`
|
||||
);
|
||||
}
|
||||
// Validate pluginTier property
|
||||
const { pluginTier } = pluginEntry;
|
||||
if (typeof pluginTier !== "undefined") {
|
||||
const validPluginTiers = ["official", "community"];
|
||||
const isValid = validPluginTiers.indexOf(pluginTier) !== -1;
|
||||
if (!isValid) {
|
||||
throw new Error(
|
||||
`Failed to validate plugin docs config. Invalid pluginTier "${pluginTier}" found for "${
|
||||
title || pluginEntry.path || repo
|
||||
}". In "website/data/docs-remote-plugins.json", the optional pluginTier property must be one of ${JSON.stringify(
|
||||
validPluginTiers
|
||||
)}. The pluginTier property can also be omitted, in which case it will be determined from the plugin repository owner.`
|
||||
);
|
||||
}
|
||||
}
|
||||
// Validate that local zip files are not used in production
|
||||
if (typeof pluginEntry.zipFile !== "undefined") {
|
||||
throw new Error(
|
||||
`Local ZIP file being used for "${
|
||||
title || pluginEntry.path || repo
|
||||
}". The zipFile option should only be used for local development. Please omit the zipFile attribute and ensure the plugin entry points to a remote repository.`
|
||||
);
|
||||
}
|
||||
// Attempt to fetch plugin docs files
|
||||
const docsMdxFiles = await fetchPluginDocs({ repo, tag: version });
|
||||
const mdxFilesByComponent = docsMdxFiles.reduce((acc, mdxFile) => {
|
||||
const componentType = mdxFile.filePath.split("/")[1];
|
||||
if (!acc[componentType]) acc[componentType] = [];
|
||||
acc[componentType].push(mdxFile);
|
||||
return acc;
|
||||
}, {});
|
||||
console.log(`${COLOR_GREEN}Found valid docs:${COLOR_RESET}`);
|
||||
Object.keys(mdxFilesByComponent).forEach((component) => {
|
||||
const componentFiles = mdxFilesByComponent[component];
|
||||
console.log(` ${component}`);
|
||||
componentFiles.forEach(({ filePath }) => {
|
||||
const pathFromComponent = filePath.split("/").slice(2).join("/");
|
||||
console.log(` ├── ${pathFromComponent}`);
|
||||
});
|
||||
});
|
||||
} catch (err) {
|
||||
console.log(`${COLOR_RED}${err}${COLOR_RESET}`);
|
||||
failureMessages.push(`\n${COLOR_RED}× ${repo}: ${COLOR_RESET}${err}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (failureMessages.length === 0) {
|
||||
console.log(
|
||||
`\n---\n\n${COLOR_GREEN}Summary: Successfully resolved all plugin docs.`
|
||||
);
|
||||
pluginEntries.forEach((e) =>
|
||||
console.log(`${COLOR_GREEN}✓ ${e.repo}${COLOR_RESET}`)
|
||||
);
|
||||
console.log("");
|
||||
} else {
|
||||
console.log(
|
||||
`\n---\n\n${COLOR_RED}Summary: Failed to fetch docs for ${failureMessages.length} plugin(s):`
|
||||
);
|
||||
failureMessages.forEach((err) => console.log(err));
|
||||
console.log("");
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
checkPluginDocs();
|
29
.github/workflows/check-plugin-docs.yml
vendored
29
.github/workflows/check-plugin-docs.yml
vendored
@ -1,29 +0,0 @@
|
||||
#
|
||||
# This GitHub action checks plugin repositories for valid docs.
|
||||
#
|
||||
# This provides a quick assessment on PRs of whether
|
||||
# there might be issues with docs in plugin repositories.
|
||||
#
|
||||
# This is intended to help debug Vercel build issues, which
|
||||
# may or may not be related to docs in plugin repositories.
|
||||
|
||||
name: "website: Check plugin docs"
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "website/**"
|
||||
schedule:
|
||||
- cron: "45 0 * * *"
|
||||
|
||||
jobs:
|
||||
check-plugin-docs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v1
|
||||
- name: Install Dependencies
|
||||
run: npm i isomorphic-unfetch adm-zip gray-matter
|
||||
- name: Fetch and validate plugin docs
|
||||
run: node .github/workflows/check-plugin-docs.js
|
17
.github/workflows/issue-comment-created.yml
vendored
17
.github/workflows/issue-comment-created.yml
vendored
@ -1,17 +0,0 @@
|
||||
name: Issue Comment Created Triage
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
issue_comment_triage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions-ecosystem/action-remove-labels@v1
|
||||
with:
|
||||
github_token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
labels: |
|
||||
stale
|
||||
waiting-reply
|
16
.github/workflows/issues-opened.yml
vendored
16
.github/workflows/issues-opened.yml
vendored
@ -1,16 +0,0 @@
|
||||
name: Issue Opened Triage
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
issue_triage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: github/issue-labeler@v2
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
configuration-path: .github/labeler-issue-triage.yml
|
||||
|
37
.github/workflows/linkchecker.yml
vendored
37
.github/workflows/linkchecker.yml
vendored
@ -1,37 +0,0 @@
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'website/**'
|
||||
|
||||
name: Check markdown links on modified website files
|
||||
jobs:
|
||||
vercel-deployment-poll:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5 #cancel job if no deployment is found within x minutes
|
||||
outputs:
|
||||
url: ${{ steps.waitForVercelPreviewDeployment.outputs.url }}
|
||||
steps:
|
||||
- name: Wait for Vercel preview deployment to be ready
|
||||
uses: nywilken/wait-for-vercel-preview@master
|
||||
id: waitForVercelPreviewDeployment
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
max_timeout: 600 # in seconds, set really high to leverage job timeout-minutes values
|
||||
allow_inactive: true # needed to ensure we get a URL for a previously released deployment
|
||||
markdown-link-check:
|
||||
needs: vercel-deployment-poll
|
||||
if: ${{ needs.vercel-deployment-poll.outputs.url != '' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Get Deployment URL
|
||||
run:
|
||||
echo "DEPLOYMENT_URL=${{ needs.vercel-deployment-poll.outputs.url }}" >> $GITHUB_ENV
|
||||
- name: Checkout source branch
|
||||
uses: actions/checkout@master
|
||||
- name: Check links
|
||||
uses: gaurav-nelson/github-action-markdown-link-check@v1
|
||||
with:
|
||||
use-quiet-mode: 'yes'
|
||||
file-extension: 'mdx'
|
||||
check-modified-files-only: 'yes'
|
||||
folder-path: 'website/content'
|
29
.github/workflows/lock.yml
vendored
29
.github/workflows/lock.yml
vendored
@ -1,29 +0,0 @@
|
||||
name: 'Lock Threads'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '50 1 * * *'
|
||||
|
||||
# Only 50 issues will be handled during a given run.
|
||||
jobs:
|
||||
lock:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: dessant/lock-threads@v2
|
||||
with:
|
||||
github-token: ${{ github.token }}
|
||||
issue-lock-comment: >
|
||||
I'm going to lock this issue because it has been closed for _30 days_ ⏳. This helps our maintainers find and focus on the active issues.
|
||||
|
||||
If you have found a problem that seems similar to this, please open a new issue and complete the issue template so we can capture all the details necessary to investigate further.
|
||||
issue-lock-inactive-days: '30'
|
||||
# Issues older than 180 days ago should be ignored
|
||||
issue-exclude-created-before: '2020-11-01'
|
||||
pr-lock-comment: >
|
||||
I'm going to lock this pull request because it has been closed for _30 days_ ⏳. This helps our maintainers find and focus on the active issues.
|
||||
|
||||
If you have found a problem that seems related to this change, please open a new issue and complete the issue template so we can capture all the details necessary to investigate further.
|
||||
pr-lock-inactive-days: '30'
|
||||
# Issues older than 180 days ago should be ignored
|
||||
pr-exclude-created-before: '2020-11-01'
|
||||
|
17
.github/workflows/scheduled-link-checker.yml
vendored
17
.github/workflows/scheduled-link-checker.yml
vendored
@ -1,17 +0,0 @@
|
||||
on:
|
||||
schedule:
|
||||
- cron: "45 0 * * *"
|
||||
name: Check Markdown links on main branch
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set deployment URL env
|
||||
run:
|
||||
echo "DEPLOYMENT_URL=https://packer-git-master.hashicorp.vercel.app" >> $GITHUB_ENV
|
||||
- uses: actions/checkout@master
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@v1
|
||||
with:
|
||||
use-quiet-mode: 'yes'
|
||||
file-extension: 'mdx'
|
||||
folder-path: 'website/content'
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -13,8 +13,6 @@ test/.env
|
||||
*.received.*
|
||||
*.swp
|
||||
|
||||
vendor/
|
||||
|
||||
website/.bundle
|
||||
website/vendor
|
||||
|
||||
|
@ -85,7 +85,8 @@ run:
|
||||
# If invoked with -mod=vendor, the go command assumes that the vendor
|
||||
# directory holds the correct copies of dependencies and ignores
|
||||
# the dependency descriptions in go.mod.
|
||||
modules-download-mode: readonly
|
||||
modules-download-mode: vendor
|
||||
|
||||
|
||||
# output configuration options
|
||||
output:
|
||||
|
@ -1,17 +1,24 @@
|
||||
poll "label_issue_migrater" "remote_plugin_migrater" {
|
||||
schedule = "0 20 * * * *"
|
||||
new_owner = "hashicorp"
|
||||
repo_prefix = "packer-plugin-"
|
||||
label_prefix = "remote-plugin/"
|
||||
excluded_label_prefixes = ["communicator/"]
|
||||
excluded_labels = ["build", "core", "new-plugin-contribution", "website"]
|
||||
|
||||
issue_header = <<-EOF
|
||||
_This issue was originally opened by @${var.user} as ${var.repository}#${var.issue_number}. It was migrated here as a result of the [Packer plugin split](https://github.com/hashicorp/packer/issues/8610#issuecomment-770034737). The original body of the issue is below._
|
||||
|
||||
<hr>
|
||||
|
||||
EOF
|
||||
migrated_comment = "This issue has been automatically migrated to ${var.repository}#${var.issue_number} because it looks like an issue with that plugin. If you believe this is _not_ an issue with the plugin, please reply to ${var.repository}#${var.issue_number}."
|
||||
behavior "regexp_issue_labeler" "panic_label" {
|
||||
regexp = "panic:"
|
||||
labels = ["crash", "bug"]
|
||||
}
|
||||
|
||||
behavior "remove_labels_on_reply" "remove_stale" {
|
||||
labels = ["waiting-reply", "stale"]
|
||||
only_non_maintainers = true
|
||||
}
|
||||
|
||||
poll "closed_issue_locker" "locker" {
|
||||
schedule = "0 50 1 * * *"
|
||||
closed_for = "720h" # 30 days
|
||||
max_issues = 500
|
||||
sleep_between_issues = "5s"
|
||||
no_comment_if_no_activity_for = "4320h" # 180 days
|
||||
|
||||
message = <<-EOF
|
||||
I'm going to lock this issue because it has been closed for _30 days_ ⏳. This helps our maintainers find and focus on the active issues.
|
||||
|
||||
If you have found a problem that seems similar to this, please open a new issue and complete the issue template so we can capture all the details necessary to investigate further.
|
||||
EOF
|
||||
}
|
||||
|
412
CHANGELOG.md
412
CHANGELOG.md
@ -1,411 +1,9 @@
|
||||
## 1.7.3 (Upcoming)
|
||||
## 1.6.6 (Upcoming)
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
|
||||
Major refactor: Extracted a majority of HashiCorp-maintained and community plugins from the Packer Core repository. They now live in their own multi-component plugin repositiores. The following repositories have been created, and their components have been deleted from the "github.com/hashicorp/packer" repository.
|
||||
|
||||
* "github.com/hashicorp/packer-plugin-alicloud" [GH-10932]
|
||||
* "github.com/hashicorp/packer-plugin-amazon" [GH-10800]
|
||||
* "github.com/hashicorp/packer-plugin-ansible" [GH-10912]
|
||||
* "github.com/hashicorp/packer-plugin-azure" [GH-10979]
|
||||
* "github.com/hashicorp/packer-plugin-chef" [GH-10921]
|
||||
* "github.com/hashicorp/packer-plugin-cloudstack" [GH-10934]
|
||||
* "github.com/hashicorp/packer-plugin-converge" [GH-10956]
|
||||
* "github.com/hashicorp/packer-plugin-digitalocean" [GH-10961]
|
||||
* "github.com/hashicorp/packer-plugin-docker" [GH-10695]
|
||||
* "github.com/hashicorp/packer-plugin-googlecompute" [GH-10890]
|
||||
* "github.com/hashicorp/packer-plugin-hcloud" [GH-10966]
|
||||
* "github.com/hashicorp/packer-plugin-hyperone" [GH-10949]
|
||||
* "github.com/hashicorp/packer-plugin-hyperv" [GH-10949]
|
||||
* "github.com/hashicorp/packer-plugin-inspec"
|
||||
* "github.com/hashicorp/packer-plugin-ionos-cloud"
|
||||
* "github.com/hashicorp/packer-plugin-jdcloud" [GH-10946]
|
||||
* "github.com/hashicorp/packer-plugin-linode" [GH-10947]
|
||||
* "github.com/hashicorp/packer-plugin-lxc" [GH-10965]
|
||||
* "github.com/hashicorp/packer-plugin-lxd" [GH-10965]
|
||||
* "github.com/hashicorp/packer-plugin-ncloud" [GH-10937]
|
||||
* "github.com/hashicorp/packer-plugin-openstack" [GH-10933]
|
||||
* "github.com/hashicorp/packer-plugin-oracle" [GH-10962]
|
||||
* "github.com/hashicorp/packer-plugin-outscale" [GH-10941]
|
||||
* "github.com/hashicorp/packer-plugin-parallels" [GH-10936]
|
||||
* "github.com/hashicorp/packer-plugin-proxmox" [GH-10930]
|
||||
* "github.com/hashicorp/packer-plugin-puppet" [GH-10943]
|
||||
* "github.com/hashicorp/packer-plugin-qemu" [GH-10929]
|
||||
* "github.com/hashicorp/packer-plugin-salt"
|
||||
* "github.com/hashicorp/packer-plugin-scaleway" [GH-10939]
|
||||
* "github.com/hashicorp/packer-plugin-tencentcloud" [GH-10967]
|
||||
* "github.com/hashicorp/packer-plugin-triton" [GH-10963]
|
||||
* "github.com/hashicorp/packer-plugin-ucloud" [GH-10953]
|
||||
* "github.com/hashicorp/packer-plugin-vagrant" [GH-10960]
|
||||
* "github.com/hashicorp/packer-plugin-virtualbox" [GH-10910]
|
||||
* "github.com/hashicorp/packer-plugin-vmware" [GH-10920]
|
||||
* "github.com/hashicorp/packer-plugin-vsphere" [GH-10896]
|
||||
* "github.com/hashicorp/packer-plugin-yandex" [GH-10970]
|
||||
|
||||
_this will not be a backwards-breaking change in v1.7.3_ because the extracted
|
||||
components are being vendored back into Packer. However, we encourage users to
|
||||
begin using `packer init` to download and install plugins to get the latest
|
||||
updates to each plugin, and to prepare for Packer v2.0 when we will stop
|
||||
vendoring the above plugins into the main Packer binary. The following
|
||||
components will not be removed from the main packer binary:
|
||||
|
||||
* `null` builder
|
||||
* `file` builder
|
||||
* `breakpoint` provisioner
|
||||
|
||||
* `file` provisioner
|
||||
* `powershell` provisioner
|
||||
* `shell` provisioner
|
||||
* `shell-local` provisioner
|
||||
* `sleep` provisioner
|
||||
* `windows-restart` provisioner
|
||||
* `windows-shell` provisioner
|
||||
|
||||
* `artifice` post-processor
|
||||
* `checksum` post-processor
|
||||
* `compress` post-processor
|
||||
* `manifest` post-processor
|
||||
* `shell-local` post-processor
|
||||
|
||||
### Bug Fixes:
|
||||
* builder/azure: Add `keep_os_disk` parameter to control OS disk deletion
|
||||
[GH-10045]
|
||||
* builder/azure: Stop SIG timout from being overridden by PollingDuration
|
||||
[GH-10816]
|
||||
* builder/azure: Support shared image gallery storage account type [GH-10863]
|
||||
* builder/proxmox: Proxmox builder use ipv4 address instead of always ipv6.
|
||||
[GH-10858]
|
||||
* core/hcl: Fix Invalid provisioner pause_before panic [GH-10978]
|
||||
* core: HCL "index" function now actually returns the index of the element
|
||||
[GH-11008]
|
||||
* core: Implemented DEFAULT_NAME handling for datasource plugins [GH-11026]
|
||||
|
||||
### Enhancements:
|
||||
|
||||
* builder/azure: Added custom nicname and osdiskname [GH-10938]
|
||||
* builder/azure: Add support for shared image gallery storage account type
|
||||
[GH-10863]
|
||||
* builder/digitalocean: support ecdsa, ed25519, dsa temporary key types.
|
||||
[GH-10856]
|
||||
* builder/ncloud: Support ncloud vpc version [GH-10870]
|
||||
* post-processor/compress: Add bzip2 support to post-processor [GH-10867]
|
||||
* post-processor/googlecompute-import: Add Image Storage Locations field
|
||||
[GH-10864]
|
||||
* Removed the golang "vendor" directory in favor of go modules. This should not
|
||||
affect end users. [GH-10916]
|
||||
|
||||
## 1.7.2 (April 05, 2021)
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
* builder/alicloud: Add `ramrole` configuration to ECS instance. [GH-10845]
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
* builder/proxmox: Update Proxmox Go API to ensure only the first non-loopback
|
||||
IPv4 address gets returned. [GH-10858]
|
||||
* builder/vsphere: Fix primary disk resize on clone. [GH-10848]
|
||||
* core: Fix bug where call to "packer version" sent output to stderr instead of
|
||||
stdout. [GH-10850]
|
||||
|
||||
## 1.7.1 (March 31, 2021)
|
||||
|
||||
### NOTES:
|
||||
|
||||
* builder/amazon: Has been vendored in this release and will no longer be
|
||||
updated with Packer core. In Packer v1.8.0 the plugin will be removed
|
||||
entirely. The `amazon` components will continue to work as expected until
|
||||
then, but for the latest offerings of the Amazon plugin, users are
|
||||
encourage to use the `packer init` command to install the latest release
|
||||
version. For more details see [Installing Packer
|
||||
Plugins](https://www.packer.io/docs/plugins#installing-plugins)
|
||||
* builder/docker: Has been vendored in this release and will no longer be
|
||||
updated with Packer core. In Packer v1.8.0 the plugin will be removed
|
||||
entirely. The `docker` builder will continue to work as expected until
|
||||
then, but for the latest offerings of the Docker plugin, users are
|
||||
encourage to use the `packer init` command to install the latest release
|
||||
version. For more details see [Installing Packer
|
||||
Plugins](https://www.packer.io/docs/plugins#installing-plugins)
|
||||
* darwin/arm64: Packer now includes the darwin/arm64 binary to its releases to
|
||||
supports the new OSX M1. [GH-10804]
|
||||
* post-processor/docker-\*: Have been vendored in this release and will no
|
||||
longer be updated with Packer core. In Packer v1.8.0 the plugin will be
|
||||
removed entirely. The `docker` builder will continue to work as expected
|
||||
until then, but for the latest offerings of the Docker plugin, users are
|
||||
encourage to use the `packer init` command to install the latest release
|
||||
version. For more details see [Installing Packer
|
||||
Plugins](https://www.packer.io/docs/plugins#installing-plugins)
|
||||
* post-processor/exoscale-import: Has been vendored in this release and will no
|
||||
longer be updated with Packer core. In Packer v1.8.0 the plugin will be
|
||||
removed entirely. The `exoscale-import` post-processor will continue to
|
||||
work as expected until then, but for the latest offerings of the Exoscale
|
||||
plugin, users are encourage to use the `packer init` command to install the
|
||||
latest release version. For more details see [Exoscale Plugin
|
||||
Repostiroy](https://github.com/exoscale/packer-plugin-exoscale). [GH-10709]
|
||||
|
||||
### IMPROVEMENTS
|
||||
* builder/amazon: allow creation of ebs snapshots without volumes. [GH-9591]
|
||||
* builder/amazon: Fix issue for multi-region AMI build that fail when
|
||||
encrypting with KMS and sharing across accounts. [GH-10754]
|
||||
* builder/azure: Add client_cert_token_timeout option. [GH-10528]
|
||||
* builder/google: Make Windows password timeout configurable. [GH-10727]
|
||||
* builder/google: Update public GCP image project as gce-uefi-images are
|
||||
deprecated. [GH-10724]
|
||||
* builder/oracle-oci: Update Oracle Go SDK to add support for OCI flexible
|
||||
shapes. [GH-10833]
|
||||
* builder/proxmox: Allow using API tokens for Proxmox authentication.
|
||||
[GH-10797]
|
||||
* builder/qemu: Added firmware option. [GH-10683]
|
||||
* builder/scaleway: add support for timeout in shutdown step. [GH-10503]
|
||||
* builder/vagrant: Fix logging to be clearer when Vagrant builder overrides
|
||||
values retrieved from vagrant's ssh_config call. [GH-10743]
|
||||
* builder/virtualbox: Added ISO builder option to create additional disks.
|
||||
[GH-10674]
|
||||
* builder/virtualbox: Add options for nested virtualisation and RTC time base.
|
||||
[GH-10736]
|
||||
* builder/virtualbox: Add template options for chipset, firmware, nic, graphics
|
||||
controller, and audio controller. [GH-10671]
|
||||
* builder/virtualbox: Support for "virtio" storage and ISO drive. [GH-10632]
|
||||
* builder/vmware: Added "attach_snapshot" parameter to vmware vmx builder.
|
||||
[GH-10651]
|
||||
* command/fmt: Adding recursive flag to formatter to format subdirectories.
|
||||
[GH-10457]
|
||||
* core/hcl2: Add legacy_isotime function. [GH-10780]
|
||||
* core/hcl2: Add support for generating `dynamic` blocks within a `build`
|
||||
block. [GH-10825]
|
||||
* core/hcl2: Add templatefile function. [GH-10776]
|
||||
* core/hcl2_upgrade: hcl2_upgrade command can now upgrade json var-files.
|
||||
[GH-10676]
|
||||
* core/init: Add implicit required_plugin blocks feature. [GH-10732]
|
||||
* core: Add http_content option to serve variables from HTTP at preseed.
|
||||
[GH-10801]
|
||||
* core: Change template parsing error to include warning about file extensions.
|
||||
[GH-10652]
|
||||
* core: Update to gopsutil v3.21.1 to allow builds to work for darwin arm64.
|
||||
[GH-10697]
|
||||
* provisioner/inspec: Allow non-zero exit codes for inspec provisioner.
|
||||
[GH-10723]
|
||||
|
||||
### BUG FIXES
|
||||
* buider/azure: Update builder to ensure a proper clean up Azure temporary
|
||||
managed Os disks. [GH-10713]
|
||||
* builder/amazon: Update amazon SDK to fix an SSO login issue. [GH-10668]
|
||||
* builder/azure: Don't overwrite subscription id if unset. [GH-10659]
|
||||
* builder/azure: Set default for the parameter client_cert_token_timeout
|
||||
[GH-10783]
|
||||
* builder/google: Add new configuration field `windows_password_timeout` to
|
||||
allow user to set configurable timeouts. [GH-10727]
|
||||
* builder/hyperv: Make Packer respect winrm_host flag in winrm connect func.
|
||||
[GH-10748]
|
||||
* builder/openstack: Make Packer respect winrm_host flag in winrm connect func.
|
||||
[GH-10748]
|
||||
* builder/oracle-oci: Update Oracle Go SDK to fix issue with reading key file.
|
||||
[GH-10560] [GH-10774]
|
||||
* builder/outscale: Fix omi_description that was ignored in Osc builder
|
||||
[GH-10792]
|
||||
* builder/parallels: Make Packer respect winrm_host flag in winrm connect func.
|
||||
[GH-10748]
|
||||
* builder/proxmox: Fixes issue when using `additional_iso_files` in HCL enabled
|
||||
templates. [GH-10772]
|
||||
* builder/qemu: Make Packer respect winrm_host flag in winrm connect func.
|
||||
[GH-10748]
|
||||
* builder/virtualbox: Make Packer respect winrm_host flag in winrm connect
|
||||
func. [GH-10748]
|
||||
* builder/vmware: Added a fallback file check when trying to determine the
|
||||
network-mapping configuration. [GH-10543]
|
||||
* builder/vsphere: Fix invalid device configuration issue when creating a
|
||||
vm with multiple disk on the same controller. [GH-10844]
|
||||
* builder/vsphere: Fix issue where boot command would fail the build do to a
|
||||
key typing error. This change will now retry to type the key on error
|
||||
before giving up. [GH-10541]
|
||||
* core/hcl2_upgrade: Check for nil config map when provisioner/post-processor
|
||||
doesn't have config. [GH-10730]
|
||||
* core/hcl2_upgrade: Fix escaped quotes in template functions [GH-10794]
|
||||
* core/hcl2_upgrade: Make hcl2_upgrade command correctly translate
|
||||
pause_before. [GH-10654]
|
||||
* core/hcl2_upgrade: Make json variables using template engines get stored as
|
||||
locals so they can be properly interpolated. [GH-10685]
|
||||
* core/init: Fixes issue where `packer init` was failing to install valid
|
||||
plugins containing a 'v' within its name. [GH-10760]
|
||||
* core: Packer will now show a proper error message when failing to load the
|
||||
contents of PACKER_CONFIG. [GH-10766]
|
||||
* core: Pin Packer to Golang 1.16 to fix code generation issues. [GH-10702]
|
||||
* core: Templates previously could not interpolate the environment variable
|
||||
PACKER_LOG_PATH. [GH-10660]
|
||||
* post-processor/vagrant-cloud: Override direct upload based on box size
|
||||
[GH-10820]
|
||||
* provisioner/chef-solo: HCL2 templates can support the json_string option.
|
||||
[GH-10655]
|
||||
* provisioner/inspec: Add new configuration field `valid_exit_codes` to allow
|
||||
for non-zero exit codes. [GH-10723]
|
||||
* provisioner/salt-masterless: Update urls for the bootstrap scripts used by
|
||||
salt-masterless provide. [GH-10755]
|
||||
|
||||
## 1.7.0 (February 17, 2021)
|
||||
|
||||
### FEATURES
|
||||
* **New Command** (HCL only) `packer init` command will download plugins defined
|
||||
in a new `required_plugins` block [GH-10304] [GH-10633].
|
||||
* **New Plugin Type** Data sources can be implemented (blog post forthcoming).
|
||||
[GH-10440]
|
||||
* **New Plugin** Aws Secrets Manager data source [GH-10505] [GH-10467]
|
||||
|
||||
### BACKWARDS INCOMPATIBILITIES
|
||||
* core: The API that the Packer core uses to communicate with community plugins
|
||||
has changed; maintainers of community plugins will need to upgrade their
|
||||
plugins in order to make them compatible with v1.7.0. An upgrade guide will
|
||||
be available on our guides page https://www.packer.io/guides.
|
||||
|
||||
### IMPROVEMENTS
|
||||
* builder/amazon: Add `skip_create_ami` option for testing and situations where
|
||||
artifact is not the ami. [GH-10531]
|
||||
* builder/amazon: Add IMDSv2 support for AWS EBS builder. [GH-10546]
|
||||
* builder/amazon: Add resource tags in the launch template used to request spot
|
||||
instances. [GH-10456]
|
||||
* builder/openstack: Add `skip_create_image` option for testing and situations
|
||||
where artifact is not the image. [GH-10496]
|
||||
* builder/oracle-oci: Add retry strategies to oci calls [GH-10591]
|
||||
* core/fmt: The `packer fmt` can now read from stdin. [GH-10500]
|
||||
* core/hcl: Add regex and regexall hcl2 template functions. [GH-10601]
|
||||
* core/hcl: Templates now support "sensitive" locals. [GH-10509]
|
||||
* core/hcl: Templates now support error-cleanup-provisioner. [GH-10604]
|
||||
* hcl2_upgrade: Command now comes with a flag so you can control whether output
|
||||
templates are annotated with helpful comments. [GH-10619]
|
||||
* hcl2_upgrade: Command now gracefully handles options with template engine
|
||||
interpolations. [GH-10625]
|
||||
* hcl2_upgrade: Command will convert amazon filters to use the ami data source.
|
||||
[GH-10491]
|
||||
|
||||
### BUG FIXES
|
||||
* amazon/ebssurrogate: Apply snapshot tags at same time as when taking
|
||||
snapshot. [GH-10150]
|
||||
* builder/amazon: Fix bug where validation fails if optional iops value is
|
||||
unset. [GH-10518]
|
||||
* builder/amazon: Wrap API call to get filtered image in a retry. [GH-10610]
|
||||
* builder/bsusurrogate: override bsu when omi root device is set. [GH-10490]
|
||||
* builder/google: Fix bug where Packer would fail when run by users who do not
|
||||
have permission to access the metadata, even though the metadata is not
|
||||
necessary to the run. [GH-10458]
|
||||
* builder/profitbricks: Profitbricks builder could not connect using SSH
|
||||
communicator. [GH-10549]
|
||||
* builder/proxmox: Ensure ISOs in additional_iso_files are mounted during VM
|
||||
creation. [GH-10586]
|
||||
* builder/proxmox: Improve cloud init error logging for proxmox builder.
|
||||
[GH-10499]
|
||||
* builder/qemu: Fix bug where vnc_min_port set to value greater then 5900 could
|
||||
prevent Packer from connecting to QEMU. [GH-10450] [GH-10451]
|
||||
* builder/qemu: Fix regression with cd indexing when disk_interface is `ide`.
|
||||
[GH-10519]
|
||||
* builder/vmware-esx: Skip credential validation, which requires ovftool to be
|
||||
installed, if we are not exporting an image. [GH-10520]
|
||||
* builder/yandex: Fix cloud-init config for ubuntu 20.04. [GH-10522]
|
||||
* builder/yandex: Fix incorrect access to `instance_id`. [GH-10522]
|
||||
* core/hcl: Fix bug where []uint8 types could not be passed to plugins.
|
||||
* core/hcl: fix bug where HCL core could not handle passing []uint8 to plugins.
|
||||
[GH-10516]
|
||||
* core/hcl: Fix force flag for hcl2 provisioners and post-processors.
|
||||
[GH-10571]
|
||||
* post-processor/vsphere: Fix regression where Packer would not check the exit
|
||||
status after streaming UI from the ovftool command. [GH-10468]
|
||||
* post-processor/yandex-export: Changed dhclient command and supported
|
||||
configuring disk for exportupdate-dump-method. Also added support for
|
||||
`file` builder. [GH-10488]
|
||||
|
||||
## 1.6.6 (December 16, 2020)
|
||||
|
||||
### FEATURES
|
||||
* **New command** `fmt` allows users to format existing HCL2 configuration
|
||||
files into a canonical style. Please see [fmt command
|
||||
docs](https://packer.io/docs/commands/fmt) for more details. [GH-10225]
|
||||
[GH-10377]
|
||||
* **New function** `env` allows users to set the default value of a variable to
|
||||
the value of an environment variable. Please see [env function
|
||||
docs](https://www.packer.io/docs/templates/hcl_templates/functions/contextual/env) for
|
||||
more details. [GH-10240]
|
||||
* **Future Scaffolding** This release contains a large number of no-op
|
||||
refactoring changes. The Packer team at HashiCorp is preparing to split the
|
||||
plugins and core to make it easier for our third party maintainers and
|
||||
community members to release and maintain plugins, just like HashiCorp did
|
||||
with the Terraform Core-Provider split. The Packer team is committed to
|
||||
making sure that this split is seamless for our users and for our community
|
||||
maintainers -- if you are a community maintainer, you may want to follow
|
||||
along with some of the work by looking at the
|
||||
[core-plugin-split github tag.](https://github.com/hashicorp/packer/pulls?q=is%3Apr+label%3Acore-plugin-split)
|
||||
No one needs to do anything, yet, but we felt it was worth calling out all
|
||||
the work that isn't making it into the changelog. We will be following up
|
||||
with lots of documentation and communication in early 2021 with more
|
||||
information.
|
||||
|
||||
### IMPROVEMENTS
|
||||
* builder/amazon-ebs: Add tags to launch templates. [GH-10203]
|
||||
* builder/amazon: Add support for Amazon EBS gp3 volumes. [Gh-10338]
|
||||
* builder/amazon: Increase default max_retries to lessen throttling issues.
|
||||
[GH-10290]
|
||||
* builder/amazon: Support AWS gp3 volumes [GH-10338]
|
||||
* builder/amazon: Support root volume encryption for amazon-chroot. [GH-10243]
|
||||
* builder/amazon: Validate IOPS ratio. [GH-10199]
|
||||
* builder/azure-arm: Add Azure CLI authentication support to builder.
|
||||
[GH-10157]
|
||||
* builder/azure-arm: Create keyvaults with SoftDelete enabled. [GH-10210]
|
||||
* builder/digitalocean: New option to provision with private ip. [GH-10093]
|
||||
* builder/google: Add `wait_to_add_ssh_keys` option to delay the addition of
|
||||
SSH configuration that may be disrupted during an instance boot sequence.
|
||||
[GH-10320]
|
||||
* builder/google: Add support for creating shielded VMs. [GH-10172]
|
||||
* builder/googlecompute-export: Add logging.write to service account scopes.
|
||||
[GH-10316]
|
||||
* builder/oracle-oci: Support image launch mode. [GH-10212]
|
||||
* builder/outscale: Add outscale.hk endpoint support [GH-10207]
|
||||
* builder/outscale: Add x509 certificate support. [GH-10161]
|
||||
* builder/proxmox: New config option for boot-order. [GH-10260]
|
||||
* builder/scaleway: Use the SDK functions to load profile from file and env.
|
||||
[GH-10181]
|
||||
* builder/virtualbox: Allow attaching guest additions with "none" communicator.
|
||||
[GH-10306]
|
||||
* builder/vmware: Make compatible with MacOS BigSur by using Apple DHCP leases
|
||||
instead of VMWare leases [GH-10384]
|
||||
* builder/vsphere: New option to add additional storage to a cloned vm.
|
||||
[GH-10287]
|
||||
* builder/yandex: More resilient image mounting and initialization. [GH-10335]
|
||||
* builder/yandex: Update user-data to not use cloud-config fields to prevent
|
||||
possible user data collisions. [GH-10385]
|
||||
* core/hcl: Update to `hcl2_upgrade` command to support complex variable values
|
||||
and packer version blocks. [GH-10221]
|
||||
* hcl2upgrade: Update command to fix `env` call upgrade. [GH-10244]
|
||||
* post-processor/vagrant-cloud: Add support for uploading directly to storage
|
||||
on Vagrant Cloud. [GH-10193]
|
||||
* post-processor/yandex-export: Add retries and wait after disk attach
|
||||
operation. [GH-10303]
|
||||
* post-processor/yandex-export: Show progress on export. [GH-10368]
|
||||
* post-processor/yandex-export: Use ssh communicator in export. [GH-10352]
|
||||
* post-processor/yandex-export: Verify the access to a specific bucket.
|
||||
[GH-10188]
|
||||
* provisioner/salt-masterless: Call winrepo.update_git_repos and
|
||||
pkg.refresh_db. [GH-10201]
|
||||
|
||||
### BUG FIXES
|
||||
* builder/amazon: Fix retry logic in AWS spot instance tagging. [GH-10394]
|
||||
* builder/amazon: Fix single `tag` interpolation to allow for templating engine
|
||||
usage. [GH-10224]
|
||||
* builder/google: Fix crash when using the `-on-error` build flag. [GH-10247]
|
||||
* builder/google: Fix issue with service account detection when running Packer
|
||||
on a compute instance with `use_os_login` enabled. [GH-10360]
|
||||
* builder/qemu: Fix duplication of main disk when setting "disk_image: true".
|
||||
[GH-10337]
|
||||
* builder/qemu: Fix nil pointer dereference when loading values from state.
|
||||
[GH-10249]
|
||||
* builder/qemu: Fix panic when disk_image=true and source image has no file
|
||||
extension. [GH-10226]
|
||||
* builder/vagrant: Return error if ssh-config command fails. [GH-10213]
|
||||
* builder/vsphere: WaitForIP should not return an error if an IP is not found
|
||||
[GH-10321]
|
||||
* builder/yandex: Change disk creation method to manual. [GH-10250]
|
||||
* builder/yandex: Fix issue with UserAgent string. [GH-10361]
|
||||
* builder/yandex: Fixed using cloud config when using IPv6. [GH-10297]
|
||||
* core/hcl: Ensure the `reverse` function does not break when given a value of
|
||||
type list. [GH-10380]
|
||||
* post-processor/yandex-export: Check service account id. [GH-10305]
|
||||
### FEATURES:
|
||||
* builder/azure-arm: Create keyvaults with SoftDelete enabled [GH-10210]
|
||||
* builder/outscale: Add x509 certificate support [GH-10161]
|
||||
* post-processor/yandex-export: Verify the access to a specific bucket [GH-10188]
|
||||
|
||||
## 1.6.5 (October 30, 2020)
|
||||
|
||||
|
31
CODEOWNERS
31
CODEOWNERS
@ -13,12 +13,15 @@
|
||||
/builder/digitalocean/ @andrewsomething
|
||||
/website/pages/docs/builders/digitalocean* @andrewsomething
|
||||
|
||||
/builder/hyperv/ @taliesins
|
||||
/website/pages/docs/builders/hyperv* @taliesins
|
||||
|
||||
/examples/jdcloud/ @XiaohanLiang @remrain
|
||||
/builder/jdcloud/ @XiaohanLiang @remrain
|
||||
/website/pages/docs/builders/jdcloud* @XiaohanLiang @remrain
|
||||
|
||||
/builder/linode/ @stvnjacobs @charliekenney23 @phillc
|
||||
/website/pages/docs/builders/linode* @stvnjacobs @charliekenney23 @phillc
|
||||
/builder/linode/ @displague @ctreatma @stvnjacobs @charliekenney23 @phillc
|
||||
/website/pages/docs/builders/linode* @displague @ctreatma @stvnjacobs @charliekenney23 @phillc
|
||||
|
||||
/builder/lxc/ @ChrisLundquist
|
||||
/website/pages/docs/builders/lxc* @ChrisLundquist
|
||||
@ -34,12 +37,15 @@
|
||||
/builder/oracle/ @prydie @owainlewis
|
||||
/website/pages/docs/builders/oracle* @prydie @owainlewis
|
||||
|
||||
/builder/profitbricks/ @LiviusP @mflorin
|
||||
/website/pages/docs/builders/profitbricks* @LiviusP @mflorin
|
||||
/builder/profitbricks/ @jasmingacic
|
||||
/website/pages/docs/builders/profitbricks* @jasmingacic
|
||||
|
||||
/builder/triton/ @sean-
|
||||
/website/pages/docs/builders/triton* @sean-
|
||||
|
||||
/builder/ncloud/ @YuSungDuk
|
||||
/website/pages/docs/builders/ncloud* @YuSungDuk
|
||||
|
||||
/builder/proxmox/ @carlpett
|
||||
/website/pages/docs/builders/proxmox* @carlpett
|
||||
|
||||
@ -49,13 +55,28 @@
|
||||
/builder/hcloud/ @LKaemmerling
|
||||
/website/pages/docs/builders/hcloud* @LKaemmerling
|
||||
|
||||
/examples/hyperone/ @m110 @gregorybrzeski @ad-m
|
||||
/builder/hyperone/ @m110 @gregorybrzeski @ad-m
|
||||
/website/pages/docs/builders/hyperone* @m110 @gregorybrzeski @ad-m
|
||||
/test/builder_hyperone* @m110 @gregorybrzeski @ad-m
|
||||
/test/fixtures/builder-hyperone/ @m110 @gregorybrzeski @ad-m
|
||||
|
||||
/examples/ucloud/ @shawnmssu
|
||||
/builder/ucloud/ @shawnmssu
|
||||
/website/pages/docs/builders/ucloud* @shawnmssu
|
||||
|
||||
/builder/yandex/ @GennadySpb @alexanderKhaustov @seukyaso
|
||||
/website/pages/docs/builders/yandex* @GennadySpb @alexanderKhaustov @seukyaso
|
||||
|
||||
/builder/osc/ @marinsalinas @Hakujou
|
||||
/website/pages/docs/builders/osc* @marinsalinas @Hakujou
|
||||
|
||||
/examples/tencentcloud/ @likexian
|
||||
/builder/tencentcloud/ @likexian
|
||||
/website/pages/docs/builders/tencentcloud* @likexian
|
||||
|
||||
|
||||
|
||||
# provisioners
|
||||
|
||||
/examples/ansible/ @bhcleek
|
||||
@ -66,7 +87,9 @@
|
||||
|
||||
/post-processor/alicloud-import/ dongxiao.zzh@alibaba-inc.com
|
||||
/post-processor/checksum/ v.tolstov@selfip.ru
|
||||
/post-processor/exoscale-import/ @falzm @mcorbin
|
||||
/post-processor/googlecompute-export/ crunkleton@google.com
|
||||
/post-processor/yandex-export/ @GennadySpb
|
||||
/post-processor/yandex-import/ @GennadySpb
|
||||
/post-processor/vsphere-template/ nelson@bennu.cl
|
||||
/post-processor/ucloud-import/ @shawnmssu
|
||||
|
32
Makefile
32
Makefile
@ -1,7 +1,6 @@
|
||||
TEST?=$(shell go list ./...)
|
||||
COUNT?=1
|
||||
VET?=$(shell go list ./...)
|
||||
|
||||
ACC_TEST_BUILDERS?=all
|
||||
ACC_TEST_PROVISIONERS?=all
|
||||
# Get the current full sha from git
|
||||
@ -49,7 +48,7 @@ package:
|
||||
@sh -c "$(CURDIR)/scripts/dist.sh $(VERSION)"
|
||||
|
||||
install-build-deps: ## Install dependencies for bin build
|
||||
@go install github.com/mitchellh/gox@v1.0.1
|
||||
@go get github.com/mitchellh/gox
|
||||
|
||||
install-gen-deps: ## Install dependencies for code generation
|
||||
# to avoid having to tidy our go deps, we `go get` our binaries from a temp
|
||||
@ -57,8 +56,10 @@ install-gen-deps: ## Install dependencies for code generation
|
||||
# out code dependencies; so a go mod tidy will remove them again. `go
|
||||
# install` seems to install the last tagged version and we want to install
|
||||
# master.
|
||||
@(cd $(TEMPDIR) && GO111MODULE=on go get github.com/mna/pigeon@master)
|
||||
@(cd $(TEMPDIR) && GO111MODULE=on go get github.com/alvaroloes/enumer@master)
|
||||
@go install github.com/hashicorp/packer-plugin-sdk/cmd/packer-sdc@latest
|
||||
@go install ./cmd/struct-markdown
|
||||
@go install ./cmd/mapstructure-to-hcl2
|
||||
|
||||
install-lint-deps: ## Install linter dependencies
|
||||
# Pinning golangci-lint at v1.23.8 as --new-from-rev seems to work properly; the latest 1.24.0 has caused issues with memory consumption
|
||||
@ -119,11 +120,13 @@ fmt-examples:
|
||||
# generate runs `go generate` to build the dynamically generated
|
||||
# source files.
|
||||
generate: install-gen-deps ## Generate dynamically generated code
|
||||
@echo "==> removing autogenerated markdown..." # but don't remove partials generated in the SDK and copied over.
|
||||
@find website/pages -path website/pages/partials/packer-plugin-sdk -prune -o -type f | xargs grep -l '^<!-- Code generated' | xargs rm -f
|
||||
@echo "==> removing autogenerated markdown..."
|
||||
@find website/pages/ -type f | xargs grep -l '^<!-- Code generated' | xargs rm -f
|
||||
@echo "==> removing autogenerated code..."
|
||||
@find post-processor helper builder provisioner -type f | xargs grep -l '^// Code generated' | xargs rm -f
|
||||
PROJECT_ROOT="$(shell pwd)" go generate $(shell go list ./... | grep -v packer-plugin-sdk)
|
||||
@find post-processor common helper template builder provisioner -type f | xargs grep -l '^// Code generated' | xargs rm -f
|
||||
go generate ./...
|
||||
go fmt common/bootcommand/boot_command.go
|
||||
go run ./cmd/generate-fixer-deprecations
|
||||
|
||||
generate-check: generate ## Check go code generation is on par
|
||||
@echo "==> Checking that auto-generated code is not changed..."
|
||||
@ -137,16 +140,16 @@ test: mode-check vet ## Run unit tests
|
||||
@go test -count $(COUNT) $(TEST) $(TESTARGS) -timeout=3m
|
||||
|
||||
# acctest runs provisioners acceptance tests
|
||||
provisioners-acctest: #install-build-deps generate
|
||||
ACC_TEST_BUILDERS=$(ACC_TEST_BUILDERS) go test $(TEST) $(TESTARGS) -timeout=1h
|
||||
provisioners-acctest: install-build-deps generate
|
||||
ACC_TEST_BUILDERS=$(ACC_TEST_BUILDERS) ACC_TEST_PROVISIONERS=$(ACC_TEST_PROVISIONERS) go test ./provisioner/... -timeout=1h
|
||||
|
||||
# testacc runs acceptance tests
|
||||
testacc: # install-build-deps generate ## Run acceptance tests
|
||||
testacc: install-build-deps generate ## Run acceptance tests
|
||||
@echo "WARN: Acceptance tests will take a long time to run and may cost money. Ctrl-C if you want to cancel."
|
||||
PACKER_ACC=1 go test -count $(COUNT) -v $(TEST) $(TESTARGS) -timeout=120m
|
||||
|
||||
testrace: mode-check vet ## Test with race detection enabled
|
||||
@go test -count $(COUNT) -race $(TEST) $(TESTARGS) -timeout=3m -p=8
|
||||
@GO111MODULE=off go test -count $(COUNT) -race $(TEST) $(TESTARGS) -timeout=3m -p=8
|
||||
|
||||
# Runs code coverage and open a html page with report
|
||||
cover:
|
||||
@ -154,6 +157,13 @@ cover:
|
||||
go tool cover -html=coverage.out
|
||||
rm coverage.out
|
||||
|
||||
check-vendor-vs-mod: ## Check that go modules and vendored code are on par
|
||||
@GO111MODULE=on go mod vendor
|
||||
@git diff --exit-code --ignore-space-change --ignore-space-at-eol -- vendor ; if [ $$? -eq 1 ]; then \
|
||||
echo "ERROR: vendor dir is not on par with go modules definition." && \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
vet: ## Vet Go code
|
||||
@go vet $(VET) ; if [ $$? -eq 1 ]; then \
|
||||
echo "ERROR: Vet found problems in the code."; \
|
||||
|
68
README.md
68
README.md
@ -1,23 +1,23 @@
|
||||
# Packer
|
||||
|
||||
[![Build Status][circleci-badge]][circleci]
|
||||
[](https://discuss.hashicorp.com/c/packer)
|
||||
[![Windows Build Status][appveyor-badge]][appveyor]
|
||||
[](https://pkg.go.dev/github.com/hashicorp/packer)
|
||||
[![GoReportCard][report-badge]][report]
|
||||
[](https://codecov.io/gh/hashicorp/packer)
|
||||
|
||||
[circleci-badge]: https://circleci.com/gh/hashicorp/packer.svg?style=svg
|
||||
[circleci]: https://app.circleci.com/pipelines/github/hashicorp/packer
|
||||
[appveyor-badge]: https://ci.appveyor.com/api/projects/status/miavlgnp989e5obc/branch/master?svg=true
|
||||
[appveyor]: https://ci.appveyor.com/project/hashicorp/packer
|
||||
[godoc-badge]: https://godoc.org/github.com/hashicorp/packer?status.svg
|
||||
[godoc]: https://godoc.org/github.com/hashicorp/packer
|
||||
[report-badge]: https://goreportcard.com/badge/github.com/hashicorp/packer
|
||||
[report]: https://goreportcard.com/report/github.com/hashicorp/packer
|
||||
|
||||
<p align="center" style="text-align:center;">
|
||||
<a href="https://www.packer.io">
|
||||
<img alt="HashiCorp Packer logo" src="website/public/img/logo-packer-padded.svg" width="500" />
|
||||
</a>
|
||||
</p>
|
||||
* Website: https://www.packer.io
|
||||
* IRC: `#packer-tool` on Freenode
|
||||
* Mailing list: [Google Groups](https://groups.google.com/forum/#!forum/packer-tool)
|
||||
|
||||
Packer is a tool for building identical machine images for multiple platforms
|
||||
from a single source configuration.
|
||||
@ -25,7 +25,7 @@ from a single source configuration.
|
||||
Packer is lightweight, runs on every major operating system, and is highly
|
||||
performant, creating machine images for multiple platforms in parallel. Packer
|
||||
comes out of the box with support for many platforms, the full list of which can
|
||||
be found at https://www.packer.io/docs/builders.
|
||||
be found at https://www.packer.io/docs/builders/index.html.
|
||||
|
||||
Support for other platforms can be added via plugins.
|
||||
|
||||
@ -47,43 +47,33 @@ yourself](https://github.com/hashicorp/packer/blob/master/.github/CONTRIBUTING.m
|
||||
|
||||
After Packer is installed, create your first template, which tells Packer
|
||||
what platforms to build images for and how you want to build them. In our
|
||||
case, we'll create a simple AMI that has Redis pre-installed.
|
||||
|
||||
Save this file as `quick-start.pkr.hcl`. Export your AWS credentials as the
|
||||
case, we'll create a simple AMI that has Redis pre-installed. Save this
|
||||
file as `quick-start.json`. Export your AWS credentials as the
|
||||
`AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables.
|
||||
|
||||
```hcl
|
||||
variable "access_key" {
|
||||
type = string
|
||||
default = "${env("AWS_ACCESS_KEY_ID")}"
|
||||
}
|
||||
|
||||
variable "secret_key" {
|
||||
type = string
|
||||
default = "${env("AWS_SECRET_ACCESS_KEY")}"
|
||||
}
|
||||
|
||||
locals { timestamp = regex_replace(timestamp(), "[- TZ:]", "") }
|
||||
|
||||
source "amazon-ebs" "quick-start" {
|
||||
access_key = "${var.access_key}"
|
||||
ami_name = "packer-example ${local.timestamp}"
|
||||
instance_type = "t2.micro"
|
||||
region = "us-east-1"
|
||||
secret_key = "${var.secret_key}"
|
||||
source_ami = "ami-af22d9b9"
|
||||
ssh_username = "ubuntu"
|
||||
}
|
||||
|
||||
build {
|
||||
sources = ["source.amazon-ebs.quick-start"]
|
||||
```json
|
||||
{
|
||||
"variables": {
|
||||
"access_key": "{{env `AWS_ACCESS_KEY_ID`}}",
|
||||
"secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}"
|
||||
},
|
||||
"builders": [{
|
||||
"type": "amazon-ebs",
|
||||
"access_key": "{{user `access_key`}}",
|
||||
"secret_key": "{{user `secret_key`}}",
|
||||
"region": "us-east-1",
|
||||
"source_ami": "ami-af22d9b9",
|
||||
"instance_type": "t2.micro",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "packer-example {{timestamp}}"
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
Next, tell Packer to build the image:
|
||||
|
||||
```
|
||||
$ packer build quick-start.pkr.hcl
|
||||
$ packer build quick-start.json
|
||||
...
|
||||
```
|
||||
|
||||
@ -95,9 +85,11 @@ they're run, etc., is up to you.
|
||||
|
||||
## Documentation
|
||||
|
||||
Comprehensive documentation is viewable on the Packer website at https://www.packer.io/docs.
|
||||
Comprehensive documentation is viewable on the Packer website:
|
||||
|
||||
## Contributing to Packer
|
||||
https://www.packer.io/docs
|
||||
|
||||
## Developing Packer
|
||||
|
||||
See
|
||||
[CONTRIBUTING.md](https://github.com/hashicorp/packer/blob/master/.github/CONTRIBUTING.md)
|
||||
|
@ -1,71 +0,0 @@
|
||||
// component_acc_test.go should contain acceptance tests for plugin components
|
||||
// to make sure all component types can be discovered and started.
|
||||
package plugin
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
|
||||
amazonacc "github.com/hashicorp/packer-plugin-amazon/builder/ebs/acceptance"
|
||||
"github.com/hashicorp/packer-plugin-sdk/acctest"
|
||||
"github.com/hashicorp/packer/hcl2template/addrs"
|
||||
)
|
||||
|
||||
//go:embed test-fixtures/basic-amazon-ami-datasource.pkr.hcl
|
||||
var basicAmazonAmiDatasourceHCL2Template string
|
||||
|
||||
func TestAccInitAndBuildBasicAmazonAmiDatasource(t *testing.T) {
|
||||
plugin := addrs.Plugin{
|
||||
Hostname: "github.com",
|
||||
Namespace: "hashicorp",
|
||||
Type: "amazon",
|
||||
}
|
||||
testCase := &acctest.PluginTestCase{
|
||||
Name: "amazon-ami_basic_datasource_test",
|
||||
Setup: func() error {
|
||||
return cleanupPluginInstallation(plugin)
|
||||
},
|
||||
Teardown: func() error {
|
||||
helper := amazonacc.AWSHelper{
|
||||
Region: "us-west-2",
|
||||
AMIName: "packer-amazon-ami-test",
|
||||
}
|
||||
return helper.CleanUpAmi()
|
||||
},
|
||||
Template: basicAmazonAmiDatasourceHCL2Template,
|
||||
Type: "amazon-ami",
|
||||
Init: true,
|
||||
CheckInit: func(initCommand *exec.Cmd, logfile string) error {
|
||||
if initCommand.ProcessState != nil {
|
||||
if initCommand.ProcessState.ExitCode() != 0 {
|
||||
return fmt.Errorf("Bad exit code. Logfile: %s", logfile)
|
||||
}
|
||||
}
|
||||
logs, err := os.Open(logfile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable find %s", logfile)
|
||||
}
|
||||
defer logs.Close()
|
||||
|
||||
logsBytes, err := ioutil.ReadAll(logs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to read %s", logfile)
|
||||
}
|
||||
initOutput := string(logsBytes)
|
||||
return checkPluginInstallation(initOutput, plugin)
|
||||
},
|
||||
Check: func(buildCommand *exec.Cmd, logfile string) error {
|
||||
if buildCommand.ProcessState != nil {
|
||||
if buildCommand.ProcessState.ExitCode() != 0 {
|
||||
return fmt.Errorf("Bad exit code. Logfile: %s", logfile)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
acctest.TestPlugin(t, testCase)
|
||||
}
|
@ -1,112 +0,0 @@
|
||||
// plugin_acc_test.go should contain acceptance tests for features related to
|
||||
// installing, discovering and running plugins.
|
||||
package plugin
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
amazonacc "github.com/hashicorp/packer-plugin-amazon/builder/ebs/acceptance"
|
||||
"github.com/hashicorp/packer-plugin-sdk/acctest"
|
||||
"github.com/hashicorp/packer-plugin-sdk/acctest/testutils"
|
||||
"github.com/hashicorp/packer/hcl2template/addrs"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
)
|
||||
|
||||
//go:embed test-fixtures/basic-amazon-ebs.pkr.hcl
|
||||
var basicAmazonEbsHCL2Template string
|
||||
|
||||
func TestAccInitAndBuildBasicAmazonEbs(t *testing.T) {
|
||||
plugin := addrs.Plugin{
|
||||
Hostname: "github.com",
|
||||
Namespace: "hashicorp",
|
||||
Type: "amazon",
|
||||
}
|
||||
testCase := &acctest.PluginTestCase{
|
||||
Name: "amazon-ebs_basic_plugin_init_and_build_test",
|
||||
Setup: func() error {
|
||||
return cleanupPluginInstallation(plugin)
|
||||
},
|
||||
Teardown: func() error {
|
||||
helper := amazonacc.AWSHelper{
|
||||
Region: "us-east-1",
|
||||
AMIName: "packer-plugin-amazon-ebs-test",
|
||||
}
|
||||
return helper.CleanUpAmi()
|
||||
},
|
||||
Template: basicAmazonEbsHCL2Template,
|
||||
Type: "amazon-ebs",
|
||||
Init: true,
|
||||
CheckInit: func(initCommand *exec.Cmd, logfile string) error {
|
||||
if initCommand.ProcessState != nil {
|
||||
if initCommand.ProcessState.ExitCode() != 0 {
|
||||
return fmt.Errorf("Bad exit code. Logfile: %s", logfile)
|
||||
}
|
||||
}
|
||||
logs, err := os.Open(logfile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable find %s", logfile)
|
||||
}
|
||||
defer logs.Close()
|
||||
|
||||
logsBytes, err := ioutil.ReadAll(logs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to read %s", logfile)
|
||||
}
|
||||
initOutput := string(logsBytes)
|
||||
return checkPluginInstallation(initOutput, plugin)
|
||||
},
|
||||
Check: func(buildCommand *exec.Cmd, logfile string) error {
|
||||
if buildCommand.ProcessState != nil {
|
||||
if buildCommand.ProcessState.ExitCode() != 0 {
|
||||
return fmt.Errorf("Bad exit code. Logfile: %s", logfile)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
acctest.TestPlugin(t, testCase)
|
||||
}
|
||||
|
||||
func cleanupPluginInstallation(plugin addrs.Plugin) error {
|
||||
home, err := homedir.Dir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pluginPath := filepath.Join(home,
|
||||
".packer.d",
|
||||
"plugins",
|
||||
plugin.Hostname,
|
||||
plugin.Namespace,
|
||||
plugin.Type)
|
||||
testutils.CleanupFiles(pluginPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkPluginInstallation(initOutput string, plugin addrs.Plugin) error {
|
||||
expectedInitLog := "Installed plugin " + plugin.String()
|
||||
if matched, _ := regexp.MatchString(expectedInitLog+".*", initOutput); !matched {
|
||||
return fmt.Errorf("logs doesn't contain expected foo value %q", initOutput)
|
||||
}
|
||||
|
||||
home, err := homedir.Dir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pluginPath := filepath.Join(home,
|
||||
".packer.d",
|
||||
"plugins",
|
||||
plugin.Hostname,
|
||||
plugin.Namespace,
|
||||
plugin.Type)
|
||||
if !testutils.FileExists(pluginPath) {
|
||||
return fmt.Errorf("%s plugin installation not found", plugin.String())
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
packer {
|
||||
required_plugins {
|
||||
amazon = {
|
||||
version = ">= 0.0.1"
|
||||
source = "github.com/hashicorp/amazon"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data "amazon-ami" "test" {
|
||||
filters = {
|
||||
name = "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*"
|
||||
root-device-type = "ebs"
|
||||
virtualization-type = "hvm"
|
||||
}
|
||||
most_recent = true
|
||||
owners = ["099720109477"]
|
||||
}
|
||||
|
||||
source "amazon-ebs" "basic-example" {
|
||||
region = "us-west-2"
|
||||
source_ami = data.amazon-ami.test.id
|
||||
ami_name = "packer-amazon-ami-test"
|
||||
communicator = "ssh"
|
||||
instance_type = "t2.micro"
|
||||
ssh_username = "ubuntu"
|
||||
}
|
||||
|
||||
build {
|
||||
sources = [
|
||||
"source.amazon-ebs.basic-example"
|
||||
]
|
||||
}
|
@ -1,20 +0,0 @@
|
||||
packer {
|
||||
required_plugins {
|
||||
amazon = {
|
||||
version = ">= 0.0.1"
|
||||
source = "github.com/hashicorp/amazon"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
source "amazon-ebs" "basic-test" {
|
||||
region = "us-east-1"
|
||||
instance_type = "m3.medium"
|
||||
source_ami = "ami-76b2a71e"
|
||||
ssh_username = "ubuntu"
|
||||
ami_name = "packer-plugin-amazon-ebs-test"
|
||||
}
|
||||
|
||||
build {
|
||||
sources = ["source.amazon-ebs.basic-test"]
|
||||
}
|
@ -1,224 +0,0 @@
|
||||
package acctest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/provisioner/file"
|
||||
shellprovisioner "github.com/hashicorp/packer/provisioner/shell"
|
||||
)
|
||||
|
||||
// TestEnvVar must be set to a non-empty value for acceptance tests to run.
|
||||
const TestEnvVar = "PACKER_ACC"
|
||||
|
||||
// TestCase is a single set of tests to run for a backend. A TestCase
|
||||
// should generally map 1:1 to each test method for your acceptance
|
||||
// tests.
|
||||
type TestCase struct {
|
||||
// Precheck, if non-nil, will be called once before the test case
|
||||
// runs at all. This can be used for some validation prior to the
|
||||
// test running.
|
||||
PreCheck func()
|
||||
|
||||
// Builder is the Builder that will be tested. It will be available
|
||||
// as the "test" builder in the template.
|
||||
Builder packersdk.Builder
|
||||
|
||||
// Template is the template contents to use.
|
||||
Template string
|
||||
|
||||
// Check is called after this step is executed in order to test that
|
||||
// the step executed successfully. If this is not set, then the next
|
||||
// step will be called
|
||||
Check TestCheckFunc
|
||||
|
||||
// Teardown will be called before the test case is over regardless
|
||||
// of if the test succeeded or failed. This should return an error
|
||||
// in the case that the test can't guarantee all resources were
|
||||
// properly cleaned up.
|
||||
Teardown TestTeardownFunc
|
||||
|
||||
// If SkipArtifactTeardown is true, we will not attempt to destroy the
|
||||
// artifact created in this test run.
|
||||
SkipArtifactTeardown bool
|
||||
// If set, overrides the default provisioner store with custom provisioners.
|
||||
// This can be useful for running acceptance tests for a particular
|
||||
// provisioner using a specific builder.
|
||||
// Default provisioner store:
|
||||
// ProvisionerStore: packersdk.MapOfProvisioner{
|
||||
// "shell": func() (packersdk.Provisioner, error) { return &shellprovisioner.Provisioner{}, nil },
|
||||
// "file": func() (packersdk.Provisioner, error) { return &file.Provisioner{}, nil },
|
||||
// },
|
||||
ProvisionerStore packersdk.MapOfProvisioner
|
||||
}
|
||||
|
||||
// TestCheckFunc is the callback used for Check in TestStep.
|
||||
type TestCheckFunc func([]packersdk.Artifact) error
|
||||
|
||||
// TestTeardownFunc is the callback used for Teardown in TestCase.
|
||||
type TestTeardownFunc func() error
|
||||
|
||||
// TestT is the interface used to handle the test lifecycle of a test.
|
||||
//
|
||||
// Users should just use a *testing.T object, which implements this.
|
||||
type TestT interface {
|
||||
Error(args ...interface{})
|
||||
Fatal(args ...interface{})
|
||||
Skip(args ...interface{})
|
||||
}
|
||||
|
||||
type TestBuilderSet struct {
|
||||
packer.BuilderSet
|
||||
StartFn func(name string) (packersdk.Builder, error)
|
||||
}
|
||||
|
||||
func (tbs TestBuilderSet) Start(name string) (packersdk.Builder, error) { return tbs.StartFn(name) }
|
||||
|
||||
// Test performs an acceptance test on a backend with the given test case.
|
||||
//
|
||||
// Tests are not run unless an environmental variable "PACKER_ACC" is
|
||||
// set to some non-empty value. This is to avoid test cases surprising
|
||||
// a user by creating real resources.
|
||||
//
|
||||
// Tests will fail unless the verbose flag (`go test -v`, or explicitly
|
||||
// the "-test.v" flag) is set. Because some acceptance tests take quite
|
||||
// long, we require the verbose flag so users are able to see progress
|
||||
// output.
|
||||
func Test(t TestT, c TestCase) {
|
||||
// We only run acceptance tests if an env var is set because they're
|
||||
// slow and generally require some outside configuration.
|
||||
if os.Getenv(TestEnvVar) == "" {
|
||||
t.Skip(fmt.Sprintf(
|
||||
"Acceptance tests skipped unless env '%s' set",
|
||||
TestEnvVar))
|
||||
return
|
||||
}
|
||||
|
||||
// We require verbose mode so that the user knows what is going on.
|
||||
if !testTesting && !testing.Verbose() {
|
||||
t.Fatal("Acceptance tests must be run with the -v flag on tests")
|
||||
return
|
||||
}
|
||||
|
||||
// Run the PreCheck if we have it
|
||||
if c.PreCheck != nil {
|
||||
c.PreCheck()
|
||||
}
|
||||
|
||||
// Parse the template
|
||||
log.Printf("[DEBUG] Parsing template...")
|
||||
tpl, err := template.Parse(strings.NewReader(c.Template))
|
||||
if err != nil {
|
||||
t.Fatal(fmt.Sprintf("Failed to parse template: %s", err))
|
||||
return
|
||||
}
|
||||
|
||||
if c.ProvisionerStore == nil {
|
||||
c.ProvisionerStore = packersdk.MapOfProvisioner{
|
||||
"shell": func() (packersdk.Provisioner, error) { return &shellprovisioner.Provisioner{}, nil },
|
||||
"file": func() (packersdk.Provisioner, error) { return &file.Provisioner{}, nil },
|
||||
}
|
||||
}
|
||||
// Build the core
|
||||
log.Printf("[DEBUG] Initializing core...")
|
||||
core := packer.NewCore(&packer.CoreConfig{
|
||||
Components: packer.ComponentFinder{
|
||||
PluginConfig: &packer.PluginConfig{
|
||||
Builders: TestBuilderSet{
|
||||
StartFn: func(n string) (packersdk.Builder, error) {
|
||||
if n == "test" {
|
||||
return c.Builder, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
},
|
||||
},
|
||||
Provisioners: c.ProvisionerStore,
|
||||
},
|
||||
},
|
||||
Template: tpl,
|
||||
})
|
||||
err = core.Initialize()
|
||||
if err != nil {
|
||||
t.Fatal(fmt.Sprintf("Failed to init core: %s", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Get the build
|
||||
log.Printf("[DEBUG] Retrieving 'test' build")
|
||||
build, err := core.Build("test")
|
||||
if err != nil {
|
||||
t.Fatal(fmt.Sprintf("Failed to get 'test' build: %s", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Prepare it
|
||||
log.Printf("[DEBUG] Preparing 'test' build")
|
||||
warnings, err := build.Prepare()
|
||||
if err != nil {
|
||||
t.Fatal(fmt.Sprintf("Prepare error: %s", err))
|
||||
return
|
||||
}
|
||||
if len(warnings) > 0 {
|
||||
t.Fatal(fmt.Sprintf(
|
||||
"Prepare warnings:\n\n%s",
|
||||
strings.Join(warnings, "\n")))
|
||||
return
|
||||
}
|
||||
|
||||
// Run it! We use a temporary directory for caching and discard
|
||||
// any UI output. We discard since it shows up in logs anyways.
|
||||
log.Printf("[DEBUG] Running 'test' build")
|
||||
ui := &packersdk.BasicUi{
|
||||
Reader: os.Stdin,
|
||||
Writer: ioutil.Discard,
|
||||
ErrorWriter: ioutil.Discard,
|
||||
PB: &packersdk.NoopProgressTracker{},
|
||||
}
|
||||
artifacts, err := build.Run(context.Background(), ui)
|
||||
if err != nil {
|
||||
t.Fatal(fmt.Sprintf("Run error:\n\n%s", err))
|
||||
goto TEARDOWN
|
||||
}
|
||||
|
||||
// Check function
|
||||
if c.Check != nil {
|
||||
log.Printf("[DEBUG] Running check function")
|
||||
if err := c.Check(artifacts); err != nil {
|
||||
t.Fatal(fmt.Sprintf("Check error:\n\n%s", err))
|
||||
goto TEARDOWN
|
||||
}
|
||||
}
|
||||
|
||||
TEARDOWN:
|
||||
if !c.SkipArtifactTeardown {
|
||||
// Delete all artifacts
|
||||
for _, a := range artifacts {
|
||||
if err := a.Destroy(); err != nil {
|
||||
t.Error(fmt.Sprintf(
|
||||
"!!! ERROR REMOVING ARTIFACT '%s': %s !!!",
|
||||
a.String(), err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Teardown
|
||||
if c.Teardown != nil {
|
||||
log.Printf("[DEBUG] Running teardown function")
|
||||
if err := c.Teardown(); err != nil {
|
||||
t.Fatal(fmt.Sprintf("Teardown failure:\n\n%s", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This is for unit tests of this package.
|
||||
var testTesting = false
|
226
builder/alicloud/ecs/access_config.go
Normal file
226
builder/alicloud/ecs/access_config.go
Normal file
@ -0,0 +1,226 @@
|
||||
//go:generate struct-markdown
|
||||
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/builder/alicloud/version"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/template/interpolate"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
)
|
||||
|
||||
// Config of alicloud
|
||||
type AlicloudAccessConfig struct {
|
||||
// Alicloud access key must be provided unless `profile` is set, but it can
|
||||
// also be sourced from the `ALICLOUD_ACCESS_KEY` environment variable.
|
||||
AlicloudAccessKey string `mapstructure:"access_key" required:"true"`
|
||||
// Alicloud secret key must be provided unless `profile` is set, but it can
|
||||
// also be sourced from the `ALICLOUD_SECRET_KEY` environment variable.
|
||||
AlicloudSecretKey string `mapstructure:"secret_key" required:"true"`
|
||||
// Alicloud region must be provided unless `profile` is set, but it can
|
||||
// also be sourced from the `ALICLOUD_REGION` environment variable.
|
||||
AlicloudRegion string `mapstructure:"region" required:"true"`
|
||||
// The region validation can be skipped if this value is true, the default
|
||||
// value is false.
|
||||
AlicloudSkipValidation bool `mapstructure:"skip_region_validation" required:"false"`
|
||||
// The image validation can be skipped if this value is true, the default
|
||||
// value is false.
|
||||
AlicloudSkipImageValidation bool `mapstructure:"skip_image_validation" required:"false"`
|
||||
// Alicloud profile must be set unless `access_key` is set; it can also be
|
||||
// sourced from the `ALICLOUD_PROFILE` environment variable.
|
||||
AlicloudProfile string `mapstructure:"profile" required:"false"`
|
||||
// Alicloud shared credentials file path. If this file exists, access and
|
||||
// secret keys will be read from this file.
|
||||
AlicloudSharedCredentialsFile string `mapstructure:"shared_credentials_file" required:"false"`
|
||||
// STS access token, can be set through template or by exporting as
|
||||
// environment variable such as `export SECURITY_TOKEN=value`.
|
||||
SecurityToken string `mapstructure:"security_token" required:"false"`
|
||||
|
||||
client *ClientWrapper
|
||||
}
|
||||
|
||||
const Packer = "HashiCorp-Packer"
|
||||
const DefaultRequestReadTimeout = 10 * time.Second
|
||||
|
||||
// Client for AlicloudClient
|
||||
func (c *AlicloudAccessConfig) Client() (*ClientWrapper, error) {
|
||||
if c.client != nil {
|
||||
return c.client, nil
|
||||
}
|
||||
if c.SecurityToken == "" {
|
||||
c.SecurityToken = os.Getenv("SECURITY_TOKEN")
|
||||
}
|
||||
|
||||
var getProviderConfig = func(str string, key string) string {
|
||||
value, err := getConfigFromProfile(c, key)
|
||||
if err == nil && value != nil {
|
||||
str = value.(string)
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
if c.AlicloudAccessKey == "" || c.AlicloudSecretKey == "" {
|
||||
c.AlicloudAccessKey = getProviderConfig(c.AlicloudAccessKey, "access_key_id")
|
||||
c.AlicloudSecretKey = getProviderConfig(c.AlicloudSecretKey, "access_key_secret")
|
||||
c.AlicloudRegion = getProviderConfig(c.AlicloudRegion, "region_id")
|
||||
c.SecurityToken = getProviderConfig(c.SecurityToken, "sts_token")
|
||||
}
|
||||
|
||||
client, err := ecs.NewClientWithStsToken(c.AlicloudRegion, c.AlicloudAccessKey, c.AlicloudSecretKey, c.SecurityToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client.AppendUserAgent(Packer, version.AlicloudPluginVersion.FormattedVersion())
|
||||
client.SetReadTimeout(DefaultRequestReadTimeout)
|
||||
c.client = &ClientWrapper{client}
|
||||
|
||||
return c.client, nil
|
||||
}
|
||||
|
||||
func (c *AlicloudAccessConfig) Prepare(ctx *interpolate.Context) []error {
|
||||
var errs []error
|
||||
if err := c.Config(); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if c.AlicloudRegion == "" {
|
||||
c.AlicloudRegion = os.Getenv("ALICLOUD_REGION")
|
||||
}
|
||||
|
||||
if c.AlicloudRegion == "" {
|
||||
errs = append(errs, fmt.Errorf("region option or ALICLOUD_REGION must be provided in template file or environment variables."))
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *AlicloudAccessConfig) Config() error {
|
||||
if c.AlicloudAccessKey == "" {
|
||||
c.AlicloudAccessKey = os.Getenv("ALICLOUD_ACCESS_KEY")
|
||||
}
|
||||
if c.AlicloudSecretKey == "" {
|
||||
c.AlicloudSecretKey = os.Getenv("ALICLOUD_SECRET_KEY")
|
||||
}
|
||||
if c.AlicloudProfile == "" {
|
||||
c.AlicloudProfile = os.Getenv("ALICLOUD_PROFILE")
|
||||
}
|
||||
if c.AlicloudSharedCredentialsFile == "" {
|
||||
c.AlicloudSharedCredentialsFile = os.Getenv("ALICLOUD_SHARED_CREDENTIALS_FILE")
|
||||
}
|
||||
if (c.AlicloudAccessKey == "" || c.AlicloudSecretKey == "") && c.AlicloudProfile == "" {
|
||||
return fmt.Errorf("ALICLOUD_ACCESS_KEY and ALICLOUD_SECRET_KEY must be set in template file or environment variables.")
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (c *AlicloudAccessConfig) ValidateRegion(region string) error {
|
||||
|
||||
supportedRegions, err := c.getSupportedRegions()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, supportedRegion := range supportedRegions {
|
||||
if region == supportedRegion {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("Not a valid alicloud region: %s", region)
|
||||
}
|
||||
|
||||
func (c *AlicloudAccessConfig) getSupportedRegions() ([]string, error) {
|
||||
client, err := c.Client()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
regionsRequest := ecs.CreateDescribeRegionsRequest()
|
||||
regionsResponse, err := client.DescribeRegions(regionsRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
validRegions := make([]string, len(regionsResponse.Regions.Region))
|
||||
for _, valid := range regionsResponse.Regions.Region {
|
||||
validRegions = append(validRegions, valid.RegionId)
|
||||
}
|
||||
|
||||
return validRegions, nil
|
||||
}
|
||||
|
||||
func getConfigFromProfile(c *AlicloudAccessConfig, ProfileKey string) (interface{}, error) {
|
||||
providerConfig := make(map[string]interface{})
|
||||
current := c.AlicloudProfile
|
||||
if current != "" {
|
||||
profilePath, err := homedir.Expand(c.AlicloudSharedCredentialsFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if profilePath == "" {
|
||||
profilePath = fmt.Sprintf("%s/.aliyun/config.json", os.Getenv("HOME"))
|
||||
if runtime.GOOS == "windows" {
|
||||
profilePath = fmt.Sprintf("%s/.aliyun/config.json", os.Getenv("USERPROFILE"))
|
||||
}
|
||||
}
|
||||
_, err = os.Stat(profilePath)
|
||||
if !os.IsNotExist(err) {
|
||||
data, err := ioutil.ReadFile(profilePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config := map[string]interface{}{}
|
||||
err = json.Unmarshal(data, &config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, v := range config["profiles"].([]interface{}) {
|
||||
if current == v.(map[string]interface{})["name"] {
|
||||
providerConfig = v.(map[string]interface{})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
mode := ""
|
||||
if v, ok := providerConfig["mode"]; ok {
|
||||
mode = v.(string)
|
||||
} else {
|
||||
return v, nil
|
||||
}
|
||||
switch ProfileKey {
|
||||
case "access_key_id", "access_key_secret":
|
||||
if mode == "EcsRamRole" {
|
||||
return "", nil
|
||||
}
|
||||
case "ram_role_name":
|
||||
if mode != "EcsRamRole" {
|
||||
return "", nil
|
||||
}
|
||||
case "sts_token":
|
||||
if mode != "StsToken" {
|
||||
return "", nil
|
||||
}
|
||||
case "ram_role_arn", "ram_session_name":
|
||||
if mode != "RamRoleArn" {
|
||||
return "", nil
|
||||
}
|
||||
case "expired_seconds":
|
||||
if mode != "RamRoleArn" {
|
||||
return float64(0), nil
|
||||
}
|
||||
}
|
||||
return providerConfig[ProfileKey], nil
|
||||
}
|
52
builder/alicloud/ecs/access_config_test.go
Normal file
52
builder/alicloud/ecs/access_config_test.go
Normal file
@ -0,0 +1,52 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testAlicloudAccessConfig() *AlicloudAccessConfig {
|
||||
return &AlicloudAccessConfig{
|
||||
AlicloudAccessKey: "ak",
|
||||
AlicloudSecretKey: "acs",
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAlicloudAccessConfigPrepareRegion(t *testing.T) {
|
||||
c := testAlicloudAccessConfig()
|
||||
|
||||
c.AlicloudRegion = ""
|
||||
if err := c.Prepare(nil); err == nil {
|
||||
t.Fatalf("should have err")
|
||||
}
|
||||
|
||||
c.AlicloudRegion = "cn-beijing"
|
||||
if err := c.Prepare(nil); err != nil {
|
||||
t.Fatalf("shouldn't have err: %s", err)
|
||||
}
|
||||
|
||||
os.Setenv("ALICLOUD_REGION", "cn-hangzhou")
|
||||
c.AlicloudRegion = ""
|
||||
if err := c.Prepare(nil); err != nil {
|
||||
t.Fatalf("shouldn't have err: %s", err)
|
||||
}
|
||||
|
||||
c.AlicloudAccessKey = ""
|
||||
if err := c.Prepare(nil); err == nil {
|
||||
t.Fatalf("should have err")
|
||||
}
|
||||
|
||||
c.AlicloudProfile = "default"
|
||||
if err := c.Prepare(nil); err != nil {
|
||||
t.Fatalf("shouldn't have err: %s", err)
|
||||
}
|
||||
|
||||
c.AlicloudProfile = ""
|
||||
os.Setenv("ALICLOUD_PROFILE", "default")
|
||||
if err := c.Prepare(nil); err != nil {
|
||||
t.Fatalf("shouldn't have err: %s", err)
|
||||
}
|
||||
|
||||
c.AlicloudSkipValidation = false
|
||||
}
|
186
builder/alicloud/ecs/artifact.go
Normal file
186
builder/alicloud/ecs/artifact.go
Normal file
@ -0,0 +1,186 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
type Artifact struct {
|
||||
// A map of regions to alicloud image IDs.
|
||||
AlicloudImages map[string]string
|
||||
|
||||
// BuilderId is the unique ID for the builder that created this alicloud image
|
||||
BuilderIdValue string
|
||||
|
||||
// Alcloud connection for performing API stuff.
|
||||
Client *ClientWrapper
|
||||
}
|
||||
|
||||
func (a *Artifact) BuilderId() string {
|
||||
return a.BuilderIdValue
|
||||
}
|
||||
|
||||
func (*Artifact) Files() []string {
|
||||
// We have no files
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Artifact) Id() string {
|
||||
parts := make([]string, 0, len(a.AlicloudImages))
|
||||
for region, ecsImageId := range a.AlicloudImages {
|
||||
parts = append(parts, fmt.Sprintf("%s:%s", region, ecsImageId))
|
||||
}
|
||||
|
||||
sort.Strings(parts)
|
||||
return strings.Join(parts, ",")
|
||||
}
|
||||
|
||||
func (a *Artifact) String() string {
|
||||
alicloudImageStrings := make([]string, 0, len(a.AlicloudImages))
|
||||
for region, id := range a.AlicloudImages {
|
||||
single := fmt.Sprintf("%s: %s", region, id)
|
||||
alicloudImageStrings = append(alicloudImageStrings, single)
|
||||
}
|
||||
|
||||
sort.Strings(alicloudImageStrings)
|
||||
return fmt.Sprintf("Alicloud images were created:\n\n%s", strings.Join(alicloudImageStrings, "\n"))
|
||||
}
|
||||
|
||||
func (a *Artifact) State(name string) interface{} {
|
||||
switch name {
|
||||
case "atlas.artifact.metadata":
|
||||
return a.stateAtlasMetadata()
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Artifact) Destroy() error {
|
||||
errors := make([]error, 0)
|
||||
|
||||
copyingImages := make(map[string]string, len(a.AlicloudImages))
|
||||
sourceImage := make(map[string]*ecs.Image, 1)
|
||||
for regionId, imageId := range a.AlicloudImages {
|
||||
describeImagesRequest := ecs.CreateDescribeImagesRequest()
|
||||
describeImagesRequest.RegionId = regionId
|
||||
describeImagesRequest.ImageId = imageId
|
||||
describeImagesRequest.Status = ImageStatusQueried
|
||||
imagesResponse, err := a.Client.DescribeImages(describeImagesRequest)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
images := imagesResponse.Images.Image
|
||||
if len(images) == 0 {
|
||||
err := fmt.Errorf("Error retrieving details for alicloud image(%s), no alicloud images found", imageId)
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if images[0].IsCopied && images[0].Status != ImageStatusAvailable {
|
||||
copyingImages[regionId] = imageId
|
||||
} else {
|
||||
sourceImage[regionId] = &images[0]
|
||||
}
|
||||
}
|
||||
|
||||
for regionId, imageId := range copyingImages {
|
||||
log.Printf("Cancel copying alicloud image (%s) from region (%s)", imageId, regionId)
|
||||
|
||||
errs := a.unsharedAccountsOnImages(regionId, imageId)
|
||||
if errs != nil {
|
||||
errors = append(errors, errs...)
|
||||
}
|
||||
|
||||
cancelImageCopyRequest := ecs.CreateCancelCopyImageRequest()
|
||||
cancelImageCopyRequest.RegionId = regionId
|
||||
cancelImageCopyRequest.ImageId = imageId
|
||||
if _, err := a.Client.CancelCopyImage(cancelImageCopyRequest); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
for regionId, image := range sourceImage {
|
||||
imageId := image.ImageId
|
||||
log.Printf("Delete alicloud image (%s) from region (%s)", imageId, regionId)
|
||||
|
||||
errs := a.unsharedAccountsOnImages(regionId, imageId)
|
||||
if errs != nil {
|
||||
errors = append(errors, errs...)
|
||||
}
|
||||
|
||||
deleteImageRequest := ecs.CreateDeleteImageRequest()
|
||||
deleteImageRequest.RegionId = regionId
|
||||
deleteImageRequest.ImageId = imageId
|
||||
if _, err := a.Client.DeleteImage(deleteImageRequest); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
//Delete the snapshot of this images
|
||||
for _, diskDevices := range image.DiskDeviceMappings.DiskDeviceMapping {
|
||||
deleteSnapshotRequest := ecs.CreateDeleteSnapshotRequest()
|
||||
deleteSnapshotRequest.SnapshotId = diskDevices.SnapshotId
|
||||
_, err := a.Client.DeleteSnapshot(deleteSnapshotRequest)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
if len(errors) == 1 {
|
||||
return errors[0]
|
||||
} else {
|
||||
return &packer.MultiError{Errors: errors}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Artifact) unsharedAccountsOnImages(regionId string, imageId string) []error {
|
||||
var errors []error
|
||||
|
||||
describeImageShareRequest := ecs.CreateDescribeImageSharePermissionRequest()
|
||||
describeImageShareRequest.RegionId = regionId
|
||||
describeImageShareRequest.ImageId = imageId
|
||||
imageShareResponse, err := a.Client.DescribeImageSharePermission(describeImageShareRequest)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
return errors
|
||||
}
|
||||
|
||||
accountsNumber := len(imageShareResponse.Accounts.Account)
|
||||
if accountsNumber > 0 {
|
||||
accounts := make([]string, accountsNumber)
|
||||
for index, account := range imageShareResponse.Accounts.Account {
|
||||
accounts[index] = account.AliyunId
|
||||
}
|
||||
|
||||
modifyImageShareRequest := ecs.CreateModifyImageSharePermissionRequest()
|
||||
modifyImageShareRequest.RegionId = regionId
|
||||
modifyImageShareRequest.ImageId = imageId
|
||||
modifyImageShareRequest.RemoveAccount = &accounts
|
||||
_, err := a.Client.ModifyImageSharePermission(modifyImageShareRequest)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
func (a *Artifact) stateAtlasMetadata() interface{} {
|
||||
metadata := make(map[string]string)
|
||||
for region, imageId := range a.AlicloudImages {
|
||||
k := fmt.Sprintf("region.%s", region)
|
||||
metadata[k] = imageId
|
||||
}
|
||||
|
||||
return metadata
|
||||
}
|
47
builder/alicloud/ecs/artifact_test.go
Normal file
47
builder/alicloud/ecs/artifact_test.go
Normal file
@ -0,0 +1,47 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
func TestArtifact_Impl(t *testing.T) {
|
||||
var _ packer.Artifact = new(Artifact)
|
||||
}
|
||||
|
||||
func TestArtifactId(t *testing.T) {
|
||||
expected := `east:foo,west:bar`
|
||||
|
||||
ecsImages := make(map[string]string)
|
||||
ecsImages["east"] = "foo"
|
||||
ecsImages["west"] = "bar"
|
||||
|
||||
a := &Artifact{
|
||||
AlicloudImages: ecsImages,
|
||||
}
|
||||
|
||||
result := a.Id()
|
||||
if result != expected {
|
||||
t.Fatalf("bad: %s", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArtifactState_atlasMetadata(t *testing.T) {
|
||||
a := &Artifact{
|
||||
AlicloudImages: map[string]string{
|
||||
"east": "foo",
|
||||
"west": "bar",
|
||||
},
|
||||
}
|
||||
|
||||
actual := a.State("atlas.artifact.metadata")
|
||||
expected := map[string]string{
|
||||
"region.east": "foo",
|
||||
"region.west": "bar",
|
||||
}
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Fatalf("bad: %#v", actual)
|
||||
}
|
||||
}
|
269
builder/alicloud/ecs/builder.go
Normal file
269
builder/alicloud/ecs/builder.go
Normal file
@ -0,0 +1,269 @@
|
||||
//go:generate mapstructure-to-hcl2 -type Config,AlicloudDiskDevice
|
||||
|
||||
// The alicloud contains a packer.Builder implementation that
|
||||
// builds ecs images for alicloud.
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer/helper/communicator"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/common"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/commonsteps"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/config"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
// The unique ID for this builder
|
||||
const BuilderId = "alibaba.alicloud"
|
||||
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
AlicloudAccessConfig `mapstructure:",squash"`
|
||||
AlicloudImageConfig `mapstructure:",squash"`
|
||||
RunConfig `mapstructure:",squash"`
|
||||
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
type Builder struct {
|
||||
config Config
|
||||
runner multistep.Runner
|
||||
}
|
||||
|
||||
type InstanceNetWork string
|
||||
|
||||
const (
|
||||
ALICLOUD_DEFAULT_SHORT_TIMEOUT = 180
|
||||
ALICLOUD_DEFAULT_TIMEOUT = 1800
|
||||
ALICLOUD_DEFAULT_LONG_TIMEOUT = 3600
|
||||
)
|
||||
|
||||
func (b *Builder) ConfigSpec() hcldec.ObjectSpec { return b.config.FlatMapstructure().HCL2Spec() }
|
||||
|
||||
func (b *Builder) Prepare(raws ...interface{}) ([]string, []string, error) {
|
||||
err := config.Decode(&b.config, &config.DecodeOpts{
|
||||
PluginType: BuilderId,
|
||||
Interpolate: true,
|
||||
InterpolateContext: &b.config.ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{
|
||||
"run_command",
|
||||
},
|
||||
},
|
||||
}, raws...)
|
||||
b.config.ctx.EnableEnv = true
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if b.config.PackerConfig.PackerForce {
|
||||
b.config.AlicloudImageForceDelete = true
|
||||
b.config.AlicloudImageForceDeleteSnapshots = true
|
||||
}
|
||||
|
||||
// Accumulate any errors
|
||||
var errs *packer.MultiError
|
||||
errs = packer.MultiErrorAppend(errs, b.config.AlicloudAccessConfig.Prepare(&b.config.ctx)...)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.AlicloudImageConfig.Prepare(&b.config.ctx)...)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...)
|
||||
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return nil, nil, errs
|
||||
}
|
||||
|
||||
packer.LogSecretFilter.Set(b.config.AlicloudAccessKey, b.config.AlicloudSecretKey)
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (packer.Artifact, error) {
|
||||
|
||||
client, err := b.config.Client()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state := new(multistep.BasicStateBag)
|
||||
state.Put("config", &b.config)
|
||||
state.Put("client", client)
|
||||
state.Put("hook", hook)
|
||||
state.Put("ui", ui)
|
||||
state.Put("networktype", b.chooseNetworkType())
|
||||
var steps []multistep.Step
|
||||
|
||||
// Build the steps
|
||||
steps = []multistep.Step{
|
||||
&stepPreValidate{
|
||||
AlicloudDestImageName: b.config.AlicloudImageName,
|
||||
ForceDelete: b.config.AlicloudImageForceDelete,
|
||||
},
|
||||
&stepCheckAlicloudSourceImage{
|
||||
SourceECSImageId: b.config.AlicloudSourceImage,
|
||||
},
|
||||
&stepConfigAlicloudKeyPair{
|
||||
Debug: b.config.PackerDebug,
|
||||
Comm: &b.config.Comm,
|
||||
DebugKeyPath: fmt.Sprintf("ecs_%s.pem", b.config.PackerBuildName),
|
||||
RegionId: b.config.AlicloudRegion,
|
||||
},
|
||||
}
|
||||
if b.chooseNetworkType() == InstanceNetworkVpc {
|
||||
steps = append(steps,
|
||||
&stepConfigAlicloudVPC{
|
||||
VpcId: b.config.VpcId,
|
||||
CidrBlock: b.config.CidrBlock,
|
||||
VpcName: b.config.VpcName,
|
||||
},
|
||||
&stepConfigAlicloudVSwitch{
|
||||
VSwitchId: b.config.VSwitchId,
|
||||
ZoneId: b.config.ZoneId,
|
||||
CidrBlock: b.config.CidrBlock,
|
||||
VSwitchName: b.config.VSwitchName,
|
||||
})
|
||||
}
|
||||
steps = append(steps,
|
||||
&stepConfigAlicloudSecurityGroup{
|
||||
SecurityGroupId: b.config.SecurityGroupId,
|
||||
SecurityGroupName: b.config.SecurityGroupId,
|
||||
RegionId: b.config.AlicloudRegion,
|
||||
},
|
||||
&stepCreateAlicloudInstance{
|
||||
IOOptimized: b.config.IOOptimized,
|
||||
InstanceType: b.config.InstanceType,
|
||||
UserData: b.config.UserData,
|
||||
UserDataFile: b.config.UserDataFile,
|
||||
RegionId: b.config.AlicloudRegion,
|
||||
InternetChargeType: b.config.InternetChargeType,
|
||||
InternetMaxBandwidthOut: b.config.InternetMaxBandwidthOut,
|
||||
InstanceName: b.config.InstanceName,
|
||||
ZoneId: b.config.ZoneId,
|
||||
})
|
||||
if b.chooseNetworkType() == InstanceNetworkVpc {
|
||||
steps = append(steps, &stepConfigAlicloudEIP{
|
||||
AssociatePublicIpAddress: b.config.AssociatePublicIpAddress,
|
||||
RegionId: b.config.AlicloudRegion,
|
||||
InternetChargeType: b.config.InternetChargeType,
|
||||
InternetMaxBandwidthOut: b.config.InternetMaxBandwidthOut,
|
||||
SSHPrivateIp: b.config.SSHPrivateIp,
|
||||
})
|
||||
} else {
|
||||
steps = append(steps, &stepConfigAlicloudPublicIP{
|
||||
RegionId: b.config.AlicloudRegion,
|
||||
SSHPrivateIp: b.config.SSHPrivateIp,
|
||||
})
|
||||
}
|
||||
steps = append(steps,
|
||||
&stepAttachKeyPair{},
|
||||
&stepRunAlicloudInstance{},
|
||||
&communicator.StepConnect{
|
||||
Config: &b.config.RunConfig.Comm,
|
||||
Host: SSHHost(
|
||||
client,
|
||||
b.config.SSHPrivateIp),
|
||||
SSHConfig: b.config.RunConfig.Comm.SSHConfigFunc(),
|
||||
},
|
||||
&commonsteps.StepProvision{},
|
||||
&commonsteps.StepCleanupTempKeys{
|
||||
Comm: &b.config.RunConfig.Comm,
|
||||
},
|
||||
&stepStopAlicloudInstance{
|
||||
ForceStop: b.config.ForceStopInstance,
|
||||
DisableStop: b.config.DisableStopInstance,
|
||||
},
|
||||
&stepDeleteAlicloudImageSnapshots{
|
||||
AlicloudImageForceDeleteSnapshots: b.config.AlicloudImageForceDeleteSnapshots,
|
||||
AlicloudImageForceDelete: b.config.AlicloudImageForceDelete,
|
||||
AlicloudImageName: b.config.AlicloudImageName,
|
||||
AlicloudImageDestinationRegions: b.config.AlicloudImageConfig.AlicloudImageDestinationRegions,
|
||||
AlicloudImageDestinationNames: b.config.AlicloudImageConfig.AlicloudImageDestinationNames,
|
||||
})
|
||||
|
||||
if b.config.AlicloudImageIgnoreDataDisks {
|
||||
steps = append(steps, &stepCreateAlicloudSnapshot{
|
||||
WaitSnapshotReadyTimeout: b.getSnapshotReadyTimeout(),
|
||||
})
|
||||
}
|
||||
|
||||
steps = append(steps,
|
||||
&stepCreateAlicloudImage{
|
||||
AlicloudImageIgnoreDataDisks: b.config.AlicloudImageIgnoreDataDisks,
|
||||
WaitSnapshotReadyTimeout: b.getSnapshotReadyTimeout(),
|
||||
},
|
||||
&stepCreateTags{
|
||||
Tags: b.config.AlicloudImageTags,
|
||||
},
|
||||
&stepRegionCopyAlicloudImage{
|
||||
AlicloudImageDestinationRegions: b.config.AlicloudImageDestinationRegions,
|
||||
AlicloudImageDestinationNames: b.config.AlicloudImageDestinationNames,
|
||||
RegionId: b.config.AlicloudRegion,
|
||||
},
|
||||
&stepShareAlicloudImage{
|
||||
AlicloudImageShareAccounts: b.config.AlicloudImageShareAccounts,
|
||||
AlicloudImageUNShareAccounts: b.config.AlicloudImageUNShareAccounts,
|
||||
RegionId: b.config.AlicloudRegion,
|
||||
})
|
||||
|
||||
// Run!
|
||||
b.runner = commonsteps.NewRunner(steps, b.config.PackerConfig, ui)
|
||||
b.runner.Run(ctx, state)
|
||||
|
||||
// If there was an error, return that
|
||||
if rawErr, ok := state.GetOk("error"); ok {
|
||||
return nil, rawErr.(error)
|
||||
}
|
||||
|
||||
// If there are no ECS images, then just return
|
||||
if _, ok := state.GetOk("alicloudimages"); !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Build the artifact and return it
|
||||
artifact := &Artifact{
|
||||
AlicloudImages: state.Get("alicloudimages").(map[string]string),
|
||||
BuilderIdValue: BuilderId,
|
||||
Client: client,
|
||||
}
|
||||
|
||||
return artifact, nil
|
||||
}
|
||||
|
||||
func (b *Builder) chooseNetworkType() InstanceNetWork {
|
||||
if b.isVpcNetRequired() {
|
||||
return InstanceNetworkVpc
|
||||
} else {
|
||||
return InstanceNetworkClassic
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Builder) isVpcNetRequired() bool {
|
||||
// UserData and KeyPair only works in VPC
|
||||
return b.isVpcSpecified() || b.isUserDataNeeded() || b.isKeyPairNeeded()
|
||||
}
|
||||
|
||||
func (b *Builder) isVpcSpecified() bool {
|
||||
return b.config.VpcId != "" || b.config.VSwitchId != ""
|
||||
}
|
||||
|
||||
func (b *Builder) isUserDataNeeded() bool {
|
||||
// Public key setup requires userdata
|
||||
if b.config.RunConfig.Comm.SSHPrivateKeyFile != "" {
|
||||
return true
|
||||
}
|
||||
|
||||
return b.config.UserData != "" || b.config.UserDataFile != ""
|
||||
}
|
||||
|
||||
func (b *Builder) isKeyPairNeeded() bool {
|
||||
return b.config.Comm.SSHKeyPairName != "" || b.config.Comm.SSHTemporaryKeyPairName != ""
|
||||
}
|
||||
|
||||
func (b *Builder) getSnapshotReadyTimeout() int {
|
||||
if b.config.WaitSnapshotReadyTimeout > 0 {
|
||||
return b.config.WaitSnapshotReadyTimeout
|
||||
}
|
||||
|
||||
return ALICLOUD_DEFAULT_LONG_TIMEOUT
|
||||
}
|
272
builder/alicloud/ecs/builder.hcl2spec.go
Normal file
272
builder/alicloud/ecs/builder.hcl2spec.go
Normal file
@ -0,0 +1,272 @@
|
||||
// Code generated by "mapstructure-to-hcl2 -type Config,AlicloudDiskDevice"; DO NOT EDIT.
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer/hcl2template"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// FlatAlicloudDiskDevice is an auto-generated flat version of AlicloudDiskDevice.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatAlicloudDiskDevice struct {
|
||||
DiskName *string `mapstructure:"disk_name" required:"false" cty:"disk_name" hcl:"disk_name"`
|
||||
DiskCategory *string `mapstructure:"disk_category" required:"false" cty:"disk_category" hcl:"disk_category"`
|
||||
DiskSize *int `mapstructure:"disk_size" required:"false" cty:"disk_size" hcl:"disk_size"`
|
||||
SnapshotId *string `mapstructure:"disk_snapshot_id" required:"false" cty:"disk_snapshot_id" hcl:"disk_snapshot_id"`
|
||||
Description *string `mapstructure:"disk_description" required:"false" cty:"disk_description" hcl:"disk_description"`
|
||||
DeleteWithInstance *bool `mapstructure:"disk_delete_with_instance" required:"false" cty:"disk_delete_with_instance" hcl:"disk_delete_with_instance"`
|
||||
Device *string `mapstructure:"disk_device" required:"false" cty:"disk_device" hcl:"disk_device"`
|
||||
Encrypted *bool `mapstructure:"disk_encrypted" required:"false" cty:"disk_encrypted" hcl:"disk_encrypted"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatAlicloudDiskDevice.
|
||||
// FlatAlicloudDiskDevice is an auto-generated flat version of AlicloudDiskDevice.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*AlicloudDiskDevice) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatAlicloudDiskDevice)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a AlicloudDiskDevice.
|
||||
// This spec is used by HCL to read the fields of AlicloudDiskDevice.
|
||||
// The decoded values from this spec will then be applied to a FlatAlicloudDiskDevice.
|
||||
func (*FlatAlicloudDiskDevice) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"disk_name": &hcldec.AttrSpec{Name: "disk_name", Type: cty.String, Required: false},
|
||||
"disk_category": &hcldec.AttrSpec{Name: "disk_category", Type: cty.String, Required: false},
|
||||
"disk_size": &hcldec.AttrSpec{Name: "disk_size", Type: cty.Number, Required: false},
|
||||
"disk_snapshot_id": &hcldec.AttrSpec{Name: "disk_snapshot_id", Type: cty.String, Required: false},
|
||||
"disk_description": &hcldec.AttrSpec{Name: "disk_description", Type: cty.String, Required: false},
|
||||
"disk_delete_with_instance": &hcldec.AttrSpec{Name: "disk_delete_with_instance", Type: cty.Bool, Required: false},
|
||||
"disk_device": &hcldec.AttrSpec{Name: "disk_device", Type: cty.String, Required: false},
|
||||
"disk_encrypted": &hcldec.AttrSpec{Name: "disk_encrypted", Type: cty.Bool, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatConfig struct {
|
||||
PackerBuildName *string `mapstructure:"packer_build_name" cty:"packer_build_name" hcl:"packer_build_name"`
|
||||
PackerBuilderType *string `mapstructure:"packer_builder_type" cty:"packer_builder_type" hcl:"packer_builder_type"`
|
||||
PackerCoreVersion *string `mapstructure:"packer_core_version" cty:"packer_core_version" hcl:"packer_core_version"`
|
||||
PackerDebug *bool `mapstructure:"packer_debug" cty:"packer_debug" hcl:"packer_debug"`
|
||||
PackerForce *bool `mapstructure:"packer_force" cty:"packer_force" hcl:"packer_force"`
|
||||
PackerOnError *string `mapstructure:"packer_on_error" cty:"packer_on_error" hcl:"packer_on_error"`
|
||||
PackerUserVars map[string]string `mapstructure:"packer_user_variables" cty:"packer_user_variables" hcl:"packer_user_variables"`
|
||||
PackerSensitiveVars []string `mapstructure:"packer_sensitive_variables" cty:"packer_sensitive_variables" hcl:"packer_sensitive_variables"`
|
||||
AlicloudAccessKey *string `mapstructure:"access_key" required:"true" cty:"access_key" hcl:"access_key"`
|
||||
AlicloudSecretKey *string `mapstructure:"secret_key" required:"true" cty:"secret_key" hcl:"secret_key"`
|
||||
AlicloudRegion *string `mapstructure:"region" required:"true" cty:"region" hcl:"region"`
|
||||
AlicloudSkipValidation *bool `mapstructure:"skip_region_validation" required:"false" cty:"skip_region_validation" hcl:"skip_region_validation"`
|
||||
AlicloudSkipImageValidation *bool `mapstructure:"skip_image_validation" required:"false" cty:"skip_image_validation" hcl:"skip_image_validation"`
|
||||
AlicloudProfile *string `mapstructure:"profile" required:"false" cty:"profile" hcl:"profile"`
|
||||
AlicloudSharedCredentialsFile *string `mapstructure:"shared_credentials_file" required:"false" cty:"shared_credentials_file" hcl:"shared_credentials_file"`
|
||||
SecurityToken *string `mapstructure:"security_token" required:"false" cty:"security_token" hcl:"security_token"`
|
||||
AlicloudImageName *string `mapstructure:"image_name" required:"true" cty:"image_name" hcl:"image_name"`
|
||||
AlicloudImageVersion *string `mapstructure:"image_version" required:"false" cty:"image_version" hcl:"image_version"`
|
||||
AlicloudImageDescription *string `mapstructure:"image_description" required:"false" cty:"image_description" hcl:"image_description"`
|
||||
AlicloudImageShareAccounts []string `mapstructure:"image_share_account" required:"false" cty:"image_share_account" hcl:"image_share_account"`
|
||||
AlicloudImageUNShareAccounts []string `mapstructure:"image_unshare_account" cty:"image_unshare_account" hcl:"image_unshare_account"`
|
||||
AlicloudImageDestinationRegions []string `mapstructure:"image_copy_regions" required:"false" cty:"image_copy_regions" hcl:"image_copy_regions"`
|
||||
AlicloudImageDestinationNames []string `mapstructure:"image_copy_names" required:"false" cty:"image_copy_names" hcl:"image_copy_names"`
|
||||
ImageEncrypted *bool `mapstructure:"image_encrypted" required:"false" cty:"image_encrypted" hcl:"image_encrypted"`
|
||||
AlicloudImageForceDelete *bool `mapstructure:"image_force_delete" required:"false" cty:"image_force_delete" hcl:"image_force_delete"`
|
||||
AlicloudImageForceDeleteSnapshots *bool `mapstructure:"image_force_delete_snapshots" required:"false" cty:"image_force_delete_snapshots" hcl:"image_force_delete_snapshots"`
|
||||
AlicloudImageForceDeleteInstances *bool `mapstructure:"image_force_delete_instances" cty:"image_force_delete_instances" hcl:"image_force_delete_instances"`
|
||||
AlicloudImageIgnoreDataDisks *bool `mapstructure:"image_ignore_data_disks" required:"false" cty:"image_ignore_data_disks" hcl:"image_ignore_data_disks"`
|
||||
AlicloudImageTags map[string]string `mapstructure:"tags" required:"false" cty:"tags" hcl:"tags"`
|
||||
AlicloudImageTag []hcl2template.FlatKeyValue `mapstructure:"tag" required:"false" cty:"tag" hcl:"tag"`
|
||||
ECSSystemDiskMapping *FlatAlicloudDiskDevice `mapstructure:"system_disk_mapping" required:"false" cty:"system_disk_mapping" hcl:"system_disk_mapping"`
|
||||
ECSImagesDiskMappings []FlatAlicloudDiskDevice `mapstructure:"image_disk_mappings" required:"false" cty:"image_disk_mappings" hcl:"image_disk_mappings"`
|
||||
AssociatePublicIpAddress *bool `mapstructure:"associate_public_ip_address" cty:"associate_public_ip_address" hcl:"associate_public_ip_address"`
|
||||
ZoneId *string `mapstructure:"zone_id" required:"false" cty:"zone_id" hcl:"zone_id"`
|
||||
IOOptimized *bool `mapstructure:"io_optimized" required:"false" cty:"io_optimized" hcl:"io_optimized"`
|
||||
InstanceType *string `mapstructure:"instance_type" required:"true" cty:"instance_type" hcl:"instance_type"`
|
||||
Description *string `mapstructure:"description" cty:"description" hcl:"description"`
|
||||
AlicloudSourceImage *string `mapstructure:"source_image" required:"true" cty:"source_image" hcl:"source_image"`
|
||||
ForceStopInstance *bool `mapstructure:"force_stop_instance" required:"false" cty:"force_stop_instance" hcl:"force_stop_instance"`
|
||||
DisableStopInstance *bool `mapstructure:"disable_stop_instance" required:"false" cty:"disable_stop_instance" hcl:"disable_stop_instance"`
|
||||
SecurityGroupId *string `mapstructure:"security_group_id" required:"false" cty:"security_group_id" hcl:"security_group_id"`
|
||||
SecurityGroupName *string `mapstructure:"security_group_name" required:"false" cty:"security_group_name" hcl:"security_group_name"`
|
||||
UserData *string `mapstructure:"user_data" required:"false" cty:"user_data" hcl:"user_data"`
|
||||
UserDataFile *string `mapstructure:"user_data_file" required:"false" cty:"user_data_file" hcl:"user_data_file"`
|
||||
VpcId *string `mapstructure:"vpc_id" required:"false" cty:"vpc_id" hcl:"vpc_id"`
|
||||
VpcName *string `mapstructure:"vpc_name" required:"false" cty:"vpc_name" hcl:"vpc_name"`
|
||||
CidrBlock *string `mapstructure:"vpc_cidr_block" required:"false" cty:"vpc_cidr_block" hcl:"vpc_cidr_block"`
|
||||
VSwitchId *string `mapstructure:"vswitch_id" required:"false" cty:"vswitch_id" hcl:"vswitch_id"`
|
||||
VSwitchName *string `mapstructure:"vswitch_name" required:"false" cty:"vswitch_name" hcl:"vswitch_name"`
|
||||
InstanceName *string `mapstructure:"instance_name" required:"false" cty:"instance_name" hcl:"instance_name"`
|
||||
InternetChargeType *string `mapstructure:"internet_charge_type" required:"false" cty:"internet_charge_type" hcl:"internet_charge_type"`
|
||||
InternetMaxBandwidthOut *int `mapstructure:"internet_max_bandwidth_out" required:"false" cty:"internet_max_bandwidth_out" hcl:"internet_max_bandwidth_out"`
|
||||
WaitSnapshotReadyTimeout *int `mapstructure:"wait_snapshot_ready_timeout" required:"false" cty:"wait_snapshot_ready_timeout" hcl:"wait_snapshot_ready_timeout"`
|
||||
Type *string `mapstructure:"communicator" cty:"communicator" hcl:"communicator"`
|
||||
PauseBeforeConnect *string `mapstructure:"pause_before_connecting" cty:"pause_before_connecting" hcl:"pause_before_connecting"`
|
||||
SSHHost *string `mapstructure:"ssh_host" cty:"ssh_host" hcl:"ssh_host"`
|
||||
SSHPort *int `mapstructure:"ssh_port" cty:"ssh_port" hcl:"ssh_port"`
|
||||
SSHUsername *string `mapstructure:"ssh_username" cty:"ssh_username" hcl:"ssh_username"`
|
||||
SSHPassword *string `mapstructure:"ssh_password" cty:"ssh_password" hcl:"ssh_password"`
|
||||
SSHKeyPairName *string `mapstructure:"ssh_keypair_name" undocumented:"true" cty:"ssh_keypair_name" hcl:"ssh_keypair_name"`
|
||||
SSHTemporaryKeyPairName *string `mapstructure:"temporary_key_pair_name" undocumented:"true" cty:"temporary_key_pair_name" hcl:"temporary_key_pair_name"`
|
||||
SSHTemporaryKeyPairType *string `mapstructure:"temporary_key_pair_type" cty:"temporary_key_pair_type" hcl:"temporary_key_pair_type"`
|
||||
SSHTemporaryKeyPairBits *int `mapstructure:"temporary_key_pair_bits" cty:"temporary_key_pair_bits" hcl:"temporary_key_pair_bits"`
|
||||
SSHCiphers []string `mapstructure:"ssh_ciphers" cty:"ssh_ciphers" hcl:"ssh_ciphers"`
|
||||
SSHClearAuthorizedKeys *bool `mapstructure:"ssh_clear_authorized_keys" cty:"ssh_clear_authorized_keys" hcl:"ssh_clear_authorized_keys"`
|
||||
SSHKEXAlgos []string `mapstructure:"ssh_key_exchange_algorithms" cty:"ssh_key_exchange_algorithms" hcl:"ssh_key_exchange_algorithms"`
|
||||
SSHPrivateKeyFile *string `mapstructure:"ssh_private_key_file" undocumented:"true" cty:"ssh_private_key_file" hcl:"ssh_private_key_file"`
|
||||
SSHCertificateFile *string `mapstructure:"ssh_certificate_file" cty:"ssh_certificate_file" hcl:"ssh_certificate_file"`
|
||||
SSHPty *bool `mapstructure:"ssh_pty" cty:"ssh_pty" hcl:"ssh_pty"`
|
||||
SSHTimeout *string `mapstructure:"ssh_timeout" cty:"ssh_timeout" hcl:"ssh_timeout"`
|
||||
SSHWaitTimeout *string `mapstructure:"ssh_wait_timeout" undocumented:"true" cty:"ssh_wait_timeout" hcl:"ssh_wait_timeout"`
|
||||
SSHAgentAuth *bool `mapstructure:"ssh_agent_auth" undocumented:"true" cty:"ssh_agent_auth" hcl:"ssh_agent_auth"`
|
||||
SSHDisableAgentForwarding *bool `mapstructure:"ssh_disable_agent_forwarding" cty:"ssh_disable_agent_forwarding" hcl:"ssh_disable_agent_forwarding"`
|
||||
SSHHandshakeAttempts *int `mapstructure:"ssh_handshake_attempts" cty:"ssh_handshake_attempts" hcl:"ssh_handshake_attempts"`
|
||||
SSHBastionHost *string `mapstructure:"ssh_bastion_host" cty:"ssh_bastion_host" hcl:"ssh_bastion_host"`
|
||||
SSHBastionPort *int `mapstructure:"ssh_bastion_port" cty:"ssh_bastion_port" hcl:"ssh_bastion_port"`
|
||||
SSHBastionAgentAuth *bool `mapstructure:"ssh_bastion_agent_auth" cty:"ssh_bastion_agent_auth" hcl:"ssh_bastion_agent_auth"`
|
||||
SSHBastionUsername *string `mapstructure:"ssh_bastion_username" cty:"ssh_bastion_username" hcl:"ssh_bastion_username"`
|
||||
SSHBastionPassword *string `mapstructure:"ssh_bastion_password" cty:"ssh_bastion_password" hcl:"ssh_bastion_password"`
|
||||
SSHBastionInteractive *bool `mapstructure:"ssh_bastion_interactive" cty:"ssh_bastion_interactive" hcl:"ssh_bastion_interactive"`
|
||||
SSHBastionPrivateKeyFile *string `mapstructure:"ssh_bastion_private_key_file" cty:"ssh_bastion_private_key_file" hcl:"ssh_bastion_private_key_file"`
|
||||
SSHBastionCertificateFile *string `mapstructure:"ssh_bastion_certificate_file" cty:"ssh_bastion_certificate_file" hcl:"ssh_bastion_certificate_file"`
|
||||
SSHFileTransferMethod *string `mapstructure:"ssh_file_transfer_method" cty:"ssh_file_transfer_method" hcl:"ssh_file_transfer_method"`
|
||||
SSHProxyHost *string `mapstructure:"ssh_proxy_host" cty:"ssh_proxy_host" hcl:"ssh_proxy_host"`
|
||||
SSHProxyPort *int `mapstructure:"ssh_proxy_port" cty:"ssh_proxy_port" hcl:"ssh_proxy_port"`
|
||||
SSHProxyUsername *string `mapstructure:"ssh_proxy_username" cty:"ssh_proxy_username" hcl:"ssh_proxy_username"`
|
||||
SSHProxyPassword *string `mapstructure:"ssh_proxy_password" cty:"ssh_proxy_password" hcl:"ssh_proxy_password"`
|
||||
SSHKeepAliveInterval *string `mapstructure:"ssh_keep_alive_interval" cty:"ssh_keep_alive_interval" hcl:"ssh_keep_alive_interval"`
|
||||
SSHReadWriteTimeout *string `mapstructure:"ssh_read_write_timeout" cty:"ssh_read_write_timeout" hcl:"ssh_read_write_timeout"`
|
||||
SSHRemoteTunnels []string `mapstructure:"ssh_remote_tunnels" cty:"ssh_remote_tunnels" hcl:"ssh_remote_tunnels"`
|
||||
SSHLocalTunnels []string `mapstructure:"ssh_local_tunnels" cty:"ssh_local_tunnels" hcl:"ssh_local_tunnels"`
|
||||
SSHPublicKey []byte `mapstructure:"ssh_public_key" undocumented:"true" cty:"ssh_public_key" hcl:"ssh_public_key"`
|
||||
SSHPrivateKey []byte `mapstructure:"ssh_private_key" undocumented:"true" cty:"ssh_private_key" hcl:"ssh_private_key"`
|
||||
WinRMUser *string `mapstructure:"winrm_username" cty:"winrm_username" hcl:"winrm_username"`
|
||||
WinRMPassword *string `mapstructure:"winrm_password" cty:"winrm_password" hcl:"winrm_password"`
|
||||
WinRMHost *string `mapstructure:"winrm_host" cty:"winrm_host" hcl:"winrm_host"`
|
||||
WinRMNoProxy *bool `mapstructure:"winrm_no_proxy" cty:"winrm_no_proxy" hcl:"winrm_no_proxy"`
|
||||
WinRMPort *int `mapstructure:"winrm_port" cty:"winrm_port" hcl:"winrm_port"`
|
||||
WinRMTimeout *string `mapstructure:"winrm_timeout" cty:"winrm_timeout" hcl:"winrm_timeout"`
|
||||
WinRMUseSSL *bool `mapstructure:"winrm_use_ssl" cty:"winrm_use_ssl" hcl:"winrm_use_ssl"`
|
||||
WinRMInsecure *bool `mapstructure:"winrm_insecure" cty:"winrm_insecure" hcl:"winrm_insecure"`
|
||||
WinRMUseNTLM *bool `mapstructure:"winrm_use_ntlm" cty:"winrm_use_ntlm" hcl:"winrm_use_ntlm"`
|
||||
SSHPrivateIp *bool `mapstructure:"ssh_private_ip" required:"false" cty:"ssh_private_ip" hcl:"ssh_private_ip"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatConfig.
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*Config) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatConfig)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a Config.
|
||||
// This spec is used by HCL to read the fields of Config.
|
||||
// The decoded values from this spec will then be applied to a FlatConfig.
|
||||
func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"packer_build_name": &hcldec.AttrSpec{Name: "packer_build_name", Type: cty.String, Required: false},
|
||||
"packer_builder_type": &hcldec.AttrSpec{Name: "packer_builder_type", Type: cty.String, Required: false},
|
||||
"packer_core_version": &hcldec.AttrSpec{Name: "packer_core_version", Type: cty.String, Required: false},
|
||||
"packer_debug": &hcldec.AttrSpec{Name: "packer_debug", Type: cty.Bool, Required: false},
|
||||
"packer_force": &hcldec.AttrSpec{Name: "packer_force", Type: cty.Bool, Required: false},
|
||||
"packer_on_error": &hcldec.AttrSpec{Name: "packer_on_error", Type: cty.String, Required: false},
|
||||
"packer_user_variables": &hcldec.AttrSpec{Name: "packer_user_variables", Type: cty.Map(cty.String), Required: false},
|
||||
"packer_sensitive_variables": &hcldec.AttrSpec{Name: "packer_sensitive_variables", Type: cty.List(cty.String), Required: false},
|
||||
"access_key": &hcldec.AttrSpec{Name: "access_key", Type: cty.String, Required: false},
|
||||
"secret_key": &hcldec.AttrSpec{Name: "secret_key", Type: cty.String, Required: false},
|
||||
"region": &hcldec.AttrSpec{Name: "region", Type: cty.String, Required: false},
|
||||
"skip_region_validation": &hcldec.AttrSpec{Name: "skip_region_validation", Type: cty.Bool, Required: false},
|
||||
"skip_image_validation": &hcldec.AttrSpec{Name: "skip_image_validation", Type: cty.Bool, Required: false},
|
||||
"profile": &hcldec.AttrSpec{Name: "profile", Type: cty.String, Required: false},
|
||||
"shared_credentials_file": &hcldec.AttrSpec{Name: "shared_credentials_file", Type: cty.String, Required: false},
|
||||
"security_token": &hcldec.AttrSpec{Name: "security_token", Type: cty.String, Required: false},
|
||||
"image_name": &hcldec.AttrSpec{Name: "image_name", Type: cty.String, Required: false},
|
||||
"image_version": &hcldec.AttrSpec{Name: "image_version", Type: cty.String, Required: false},
|
||||
"image_description": &hcldec.AttrSpec{Name: "image_description", Type: cty.String, Required: false},
|
||||
"image_share_account": &hcldec.AttrSpec{Name: "image_share_account", Type: cty.List(cty.String), Required: false},
|
||||
"image_unshare_account": &hcldec.AttrSpec{Name: "image_unshare_account", Type: cty.List(cty.String), Required: false},
|
||||
"image_copy_regions": &hcldec.AttrSpec{Name: "image_copy_regions", Type: cty.List(cty.String), Required: false},
|
||||
"image_copy_names": &hcldec.AttrSpec{Name: "image_copy_names", Type: cty.List(cty.String), Required: false},
|
||||
"image_encrypted": &hcldec.AttrSpec{Name: "image_encrypted", Type: cty.Bool, Required: false},
|
||||
"image_force_delete": &hcldec.AttrSpec{Name: "image_force_delete", Type: cty.Bool, Required: false},
|
||||
"image_force_delete_snapshots": &hcldec.AttrSpec{Name: "image_force_delete_snapshots", Type: cty.Bool, Required: false},
|
||||
"image_force_delete_instances": &hcldec.AttrSpec{Name: "image_force_delete_instances", Type: cty.Bool, Required: false},
|
||||
"image_ignore_data_disks": &hcldec.AttrSpec{Name: "image_ignore_data_disks", Type: cty.Bool, Required: false},
|
||||
"tags": &hcldec.AttrSpec{Name: "tags", Type: cty.Map(cty.String), Required: false},
|
||||
"tag": &hcldec.BlockListSpec{TypeName: "tag", Nested: hcldec.ObjectSpec((*hcl2template.FlatKeyValue)(nil).HCL2Spec())},
|
||||
"system_disk_mapping": &hcldec.BlockSpec{TypeName: "system_disk_mapping", Nested: hcldec.ObjectSpec((*FlatAlicloudDiskDevice)(nil).HCL2Spec())},
|
||||
"image_disk_mappings": &hcldec.BlockListSpec{TypeName: "image_disk_mappings", Nested: hcldec.ObjectSpec((*FlatAlicloudDiskDevice)(nil).HCL2Spec())},
|
||||
"associate_public_ip_address": &hcldec.AttrSpec{Name: "associate_public_ip_address", Type: cty.Bool, Required: false},
|
||||
"zone_id": &hcldec.AttrSpec{Name: "zone_id", Type: cty.String, Required: false},
|
||||
"io_optimized": &hcldec.AttrSpec{Name: "io_optimized", Type: cty.Bool, Required: false},
|
||||
"instance_type": &hcldec.AttrSpec{Name: "instance_type", Type: cty.String, Required: false},
|
||||
"description": &hcldec.AttrSpec{Name: "description", Type: cty.String, Required: false},
|
||||
"source_image": &hcldec.AttrSpec{Name: "source_image", Type: cty.String, Required: false},
|
||||
"force_stop_instance": &hcldec.AttrSpec{Name: "force_stop_instance", Type: cty.Bool, Required: false},
|
||||
"disable_stop_instance": &hcldec.AttrSpec{Name: "disable_stop_instance", Type: cty.Bool, Required: false},
|
||||
"security_group_id": &hcldec.AttrSpec{Name: "security_group_id", Type: cty.String, Required: false},
|
||||
"security_group_name": &hcldec.AttrSpec{Name: "security_group_name", Type: cty.String, Required: false},
|
||||
"user_data": &hcldec.AttrSpec{Name: "user_data", Type: cty.String, Required: false},
|
||||
"user_data_file": &hcldec.AttrSpec{Name: "user_data_file", Type: cty.String, Required: false},
|
||||
"vpc_id": &hcldec.AttrSpec{Name: "vpc_id", Type: cty.String, Required: false},
|
||||
"vpc_name": &hcldec.AttrSpec{Name: "vpc_name", Type: cty.String, Required: false},
|
||||
"vpc_cidr_block": &hcldec.AttrSpec{Name: "vpc_cidr_block", Type: cty.String, Required: false},
|
||||
"vswitch_id": &hcldec.AttrSpec{Name: "vswitch_id", Type: cty.String, Required: false},
|
||||
"vswitch_name": &hcldec.AttrSpec{Name: "vswitch_name", Type: cty.String, Required: false},
|
||||
"instance_name": &hcldec.AttrSpec{Name: "instance_name", Type: cty.String, Required: false},
|
||||
"internet_charge_type": &hcldec.AttrSpec{Name: "internet_charge_type", Type: cty.String, Required: false},
|
||||
"internet_max_bandwidth_out": &hcldec.AttrSpec{Name: "internet_max_bandwidth_out", Type: cty.Number, Required: false},
|
||||
"wait_snapshot_ready_timeout": &hcldec.AttrSpec{Name: "wait_snapshot_ready_timeout", Type: cty.Number, Required: false},
|
||||
"communicator": &hcldec.AttrSpec{Name: "communicator", Type: cty.String, Required: false},
|
||||
"pause_before_connecting": &hcldec.AttrSpec{Name: "pause_before_connecting", Type: cty.String, Required: false},
|
||||
"ssh_host": &hcldec.AttrSpec{Name: "ssh_host", Type: cty.String, Required: false},
|
||||
"ssh_port": &hcldec.AttrSpec{Name: "ssh_port", Type: cty.Number, Required: false},
|
||||
"ssh_username": &hcldec.AttrSpec{Name: "ssh_username", Type: cty.String, Required: false},
|
||||
"ssh_password": &hcldec.AttrSpec{Name: "ssh_password", Type: cty.String, Required: false},
|
||||
"ssh_keypair_name": &hcldec.AttrSpec{Name: "ssh_keypair_name", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_name": &hcldec.AttrSpec{Name: "temporary_key_pair_name", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_type": &hcldec.AttrSpec{Name: "temporary_key_pair_type", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_bits": &hcldec.AttrSpec{Name: "temporary_key_pair_bits", Type: cty.Number, Required: false},
|
||||
"ssh_ciphers": &hcldec.AttrSpec{Name: "ssh_ciphers", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_clear_authorized_keys": &hcldec.AttrSpec{Name: "ssh_clear_authorized_keys", Type: cty.Bool, Required: false},
|
||||
"ssh_key_exchange_algorithms": &hcldec.AttrSpec{Name: "ssh_key_exchange_algorithms", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_private_key_file": &hcldec.AttrSpec{Name: "ssh_private_key_file", Type: cty.String, Required: false},
|
||||
"ssh_certificate_file": &hcldec.AttrSpec{Name: "ssh_certificate_file", Type: cty.String, Required: false},
|
||||
"ssh_pty": &hcldec.AttrSpec{Name: "ssh_pty", Type: cty.Bool, Required: false},
|
||||
"ssh_timeout": &hcldec.AttrSpec{Name: "ssh_timeout", Type: cty.String, Required: false},
|
||||
"ssh_wait_timeout": &hcldec.AttrSpec{Name: "ssh_wait_timeout", Type: cty.String, Required: false},
|
||||
"ssh_agent_auth": &hcldec.AttrSpec{Name: "ssh_agent_auth", Type: cty.Bool, Required: false},
|
||||
"ssh_disable_agent_forwarding": &hcldec.AttrSpec{Name: "ssh_disable_agent_forwarding", Type: cty.Bool, Required: false},
|
||||
"ssh_handshake_attempts": &hcldec.AttrSpec{Name: "ssh_handshake_attempts", Type: cty.Number, Required: false},
|
||||
"ssh_bastion_host": &hcldec.AttrSpec{Name: "ssh_bastion_host", Type: cty.String, Required: false},
|
||||
"ssh_bastion_port": &hcldec.AttrSpec{Name: "ssh_bastion_port", Type: cty.Number, Required: false},
|
||||
"ssh_bastion_agent_auth": &hcldec.AttrSpec{Name: "ssh_bastion_agent_auth", Type: cty.Bool, Required: false},
|
||||
"ssh_bastion_username": &hcldec.AttrSpec{Name: "ssh_bastion_username", Type: cty.String, Required: false},
|
||||
"ssh_bastion_password": &hcldec.AttrSpec{Name: "ssh_bastion_password", Type: cty.String, Required: false},
|
||||
"ssh_bastion_interactive": &hcldec.AttrSpec{Name: "ssh_bastion_interactive", Type: cty.Bool, Required: false},
|
||||
"ssh_bastion_private_key_file": &hcldec.AttrSpec{Name: "ssh_bastion_private_key_file", Type: cty.String, Required: false},
|
||||
"ssh_bastion_certificate_file": &hcldec.AttrSpec{Name: "ssh_bastion_certificate_file", Type: cty.String, Required: false},
|
||||
"ssh_file_transfer_method": &hcldec.AttrSpec{Name: "ssh_file_transfer_method", Type: cty.String, Required: false},
|
||||
"ssh_proxy_host": &hcldec.AttrSpec{Name: "ssh_proxy_host", Type: cty.String, Required: false},
|
||||
"ssh_proxy_port": &hcldec.AttrSpec{Name: "ssh_proxy_port", Type: cty.Number, Required: false},
|
||||
"ssh_proxy_username": &hcldec.AttrSpec{Name: "ssh_proxy_username", Type: cty.String, Required: false},
|
||||
"ssh_proxy_password": &hcldec.AttrSpec{Name: "ssh_proxy_password", Type: cty.String, Required: false},
|
||||
"ssh_keep_alive_interval": &hcldec.AttrSpec{Name: "ssh_keep_alive_interval", Type: cty.String, Required: false},
|
||||
"ssh_read_write_timeout": &hcldec.AttrSpec{Name: "ssh_read_write_timeout", Type: cty.String, Required: false},
|
||||
"ssh_remote_tunnels": &hcldec.AttrSpec{Name: "ssh_remote_tunnels", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_local_tunnels": &hcldec.AttrSpec{Name: "ssh_local_tunnels", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_public_key": &hcldec.AttrSpec{Name: "ssh_public_key", Type: cty.List(cty.Number), Required: false},
|
||||
"ssh_private_key": &hcldec.AttrSpec{Name: "ssh_private_key", Type: cty.List(cty.Number), Required: false},
|
||||
"winrm_username": &hcldec.AttrSpec{Name: "winrm_username", Type: cty.String, Required: false},
|
||||
"winrm_password": &hcldec.AttrSpec{Name: "winrm_password", Type: cty.String, Required: false},
|
||||
"winrm_host": &hcldec.AttrSpec{Name: "winrm_host", Type: cty.String, Required: false},
|
||||
"winrm_no_proxy": &hcldec.AttrSpec{Name: "winrm_no_proxy", Type: cty.Bool, Required: false},
|
||||
"winrm_port": &hcldec.AttrSpec{Name: "winrm_port", Type: cty.Number, Required: false},
|
||||
"winrm_timeout": &hcldec.AttrSpec{Name: "winrm_timeout", Type: cty.String, Required: false},
|
||||
"winrm_use_ssl": &hcldec.AttrSpec{Name: "winrm_use_ssl", Type: cty.Bool, Required: false},
|
||||
"winrm_insecure": &hcldec.AttrSpec{Name: "winrm_insecure", Type: cty.Bool, Required: false},
|
||||
"winrm_use_ntlm": &hcldec.AttrSpec{Name: "winrm_use_ntlm", Type: cty.Bool, Required: false},
|
||||
"ssh_private_ip": &hcldec.AttrSpec{Name: "ssh_private_ip", Type: cty.Bool, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
891
builder/alicloud/ecs/builder_acc_test.go
Normal file
891
builder/alicloud/ecs/builder_acc_test.go
Normal file
@ -0,0 +1,891 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
builderT "github.com/hashicorp/packer/helper/builder/testing"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
const defaultTestRegion = "cn-beijing"
|
||||
|
||||
func TestBuilderAcc_validateRegion(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if os.Getenv(builderT.TestEnvVar) == "" {
|
||||
t.Skip(fmt.Sprintf("Acceptance tests skipped unless env '%s' set", builderT.TestEnvVar))
|
||||
return
|
||||
}
|
||||
|
||||
testAccPreCheck(t)
|
||||
|
||||
access := &AlicloudAccessConfig{AlicloudRegion: "cn-beijing"}
|
||||
err := access.Config()
|
||||
if err != nil {
|
||||
t.Fatalf("init AlicloudAccessConfig failed: %s", err)
|
||||
}
|
||||
|
||||
err = access.ValidateRegion("cn-hangzhou")
|
||||
if err != nil {
|
||||
t.Fatalf("Expect pass with valid region id but failed: %s", err)
|
||||
}
|
||||
|
||||
err = access.ValidateRegion("invalidRegionId")
|
||||
if err == nil {
|
||||
t.Fatal("Expect failure due to invalid region id but passed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderAcc_basic(t *testing.T) {
|
||||
t.Parallel()
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccBasic,
|
||||
})
|
||||
}
|
||||
|
||||
const testBuilderAccBasic = `
|
||||
{ "builders": [{
|
||||
"type": "test",
|
||||
"region": "cn-beijing",
|
||||
"instance_type": "ecs.n1.tiny",
|
||||
"source_image":"ubuntu_18_04_64_20G_alibase_20190509.vhd",
|
||||
"io_optimized":"true",
|
||||
"ssh_username":"root",
|
||||
"image_name": "packer-test-basic_{{timestamp}}"
|
||||
}]
|
||||
}`
|
||||
|
||||
func TestBuilderAcc_withDiskSettings(t *testing.T) {
|
||||
t.Parallel()
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccWithDiskSettings,
|
||||
Check: checkImageDisksSettings(),
|
||||
})
|
||||
}
|
||||
|
||||
const testBuilderAccWithDiskSettings = `
|
||||
{ "builders": [{
|
||||
"type": "test",
|
||||
"region": "cn-beijing",
|
||||
"instance_type": "ecs.n1.tiny",
|
||||
"source_image":"ubuntu_18_04_64_20G_alibase_20190509.vhd",
|
||||
"io_optimized":"true",
|
||||
"ssh_username":"root",
|
||||
"image_name": "packer-test-withDiskSettings_{{timestamp}}",
|
||||
"system_disk_mapping": {
|
||||
"disk_size": 60
|
||||
},
|
||||
"image_disk_mappings": [
|
||||
{
|
||||
"disk_name": "datadisk1",
|
||||
"disk_size": 25,
|
||||
"disk_delete_with_instance": true
|
||||
},
|
||||
{
|
||||
"disk_name": "datadisk2",
|
||||
"disk_size": 25,
|
||||
"disk_delete_with_instance": true
|
||||
}
|
||||
]
|
||||
}]
|
||||
}`
|
||||
|
||||
func checkImageDisksSettings() builderT.TestCheckFunc {
|
||||
return func(artifacts []packer.Artifact) error {
|
||||
if len(artifacts) > 1 {
|
||||
return fmt.Errorf("more than 1 artifact")
|
||||
}
|
||||
|
||||
// Get the actual *Artifact pointer so we can access the AMIs directly
|
||||
artifactRaw := artifacts[0]
|
||||
artifact, ok := artifactRaw.(*Artifact)
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown artifact: %#v", artifactRaw)
|
||||
}
|
||||
imageId := artifact.AlicloudImages[defaultTestRegion]
|
||||
|
||||
// describe the image, get block devices with a snapshot
|
||||
client, _ := testAliyunClient()
|
||||
|
||||
describeImagesRequest := ecs.CreateDescribeImagesRequest()
|
||||
describeImagesRequest.RegionId = defaultTestRegion
|
||||
describeImagesRequest.ImageId = imageId
|
||||
imagesResponse, err := client.DescribeImages(describeImagesRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("describe images failed due to %s", err)
|
||||
}
|
||||
|
||||
if len(imagesResponse.Images.Image) == 0 {
|
||||
return fmt.Errorf("image %s generated can not be found", imageId)
|
||||
}
|
||||
|
||||
image := imagesResponse.Images.Image[0]
|
||||
if image.Size != 60 {
|
||||
return fmt.Errorf("the size of image %s should be equal to 60G but got %dG", imageId, image.Size)
|
||||
}
|
||||
if len(image.DiskDeviceMappings.DiskDeviceMapping) != 3 {
|
||||
return fmt.Errorf("image %s should contains 3 disks", imageId)
|
||||
}
|
||||
|
||||
var snapshotIds []string
|
||||
for _, mapping := range image.DiskDeviceMappings.DiskDeviceMapping {
|
||||
if mapping.Type == DiskTypeSystem {
|
||||
if mapping.Size != "60" {
|
||||
return fmt.Errorf("the system snapshot size of image %s should be equal to 60G but got %sG", imageId, mapping.Size)
|
||||
}
|
||||
} else {
|
||||
if mapping.Size != "25" {
|
||||
return fmt.Errorf("the data disk size of image %s should be equal to 25G but got %sG", imageId, mapping.Size)
|
||||
}
|
||||
|
||||
snapshotIds = append(snapshotIds, mapping.SnapshotId)
|
||||
}
|
||||
}
|
||||
|
||||
data, _ := json.Marshal(snapshotIds)
|
||||
|
||||
describeSnapshotRequest := ecs.CreateDescribeSnapshotsRequest()
|
||||
describeSnapshotRequest.RegionId = defaultTestRegion
|
||||
describeSnapshotRequest.SnapshotIds = string(data)
|
||||
describeSnapshotsResponse, err := client.DescribeSnapshots(describeSnapshotRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("describe data snapshots failed due to %s", err)
|
||||
}
|
||||
if len(describeSnapshotsResponse.Snapshots.Snapshot) != 2 {
|
||||
return fmt.Errorf("expect %d data snapshots but got %d", len(snapshotIds), len(describeSnapshotsResponse.Snapshots.Snapshot))
|
||||
}
|
||||
|
||||
var dataDiskIds []string
|
||||
for _, snapshot := range describeSnapshotsResponse.Snapshots.Snapshot {
|
||||
dataDiskIds = append(dataDiskIds, snapshot.SourceDiskId)
|
||||
}
|
||||
data, _ = json.Marshal(dataDiskIds)
|
||||
|
||||
describeDisksRequest := ecs.CreateDescribeDisksRequest()
|
||||
describeDisksRequest.RegionId = defaultTestRegion
|
||||
describeDisksRequest.DiskIds = string(data)
|
||||
describeDisksResponse, err := client.DescribeDisks(describeDisksRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("describe snapshots failed due to %s", err)
|
||||
}
|
||||
if len(describeDisksResponse.Disks.Disk) != 0 {
|
||||
return fmt.Errorf("data disks should be deleted but %d left", len(describeDisksResponse.Disks.Disk))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderAcc_withIgnoreDataDisks(t *testing.T) {
|
||||
t.Parallel()
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccIgnoreDataDisks,
|
||||
Check: checkIgnoreDataDisks(),
|
||||
})
|
||||
}
|
||||
|
||||
const testBuilderAccIgnoreDataDisks = `
|
||||
{ "builders": [{
|
||||
"type": "test",
|
||||
"region": "cn-beijing",
|
||||
"instance_type": "ecs.gn5-c8g1.2xlarge",
|
||||
"source_image":"ubuntu_18_04_64_20G_alibase_20190509.vhd",
|
||||
"io_optimized":"true",
|
||||
"ssh_username":"root",
|
||||
"image_name": "packer-test-ignoreDataDisks_{{timestamp}}",
|
||||
"image_ignore_data_disks": true
|
||||
}]
|
||||
}`
|
||||
|
||||
func checkIgnoreDataDisks() builderT.TestCheckFunc {
|
||||
return func(artifacts []packer.Artifact) error {
|
||||
if len(artifacts) > 1 {
|
||||
return fmt.Errorf("more than 1 artifact")
|
||||
}
|
||||
|
||||
// Get the actual *Artifact pointer so we can access the AMIs directly
|
||||
artifactRaw := artifacts[0]
|
||||
artifact, ok := artifactRaw.(*Artifact)
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown artifact: %#v", artifactRaw)
|
||||
}
|
||||
imageId := artifact.AlicloudImages[defaultTestRegion]
|
||||
|
||||
// describe the image, get block devices with a snapshot
|
||||
client, _ := testAliyunClient()
|
||||
|
||||
describeImagesRequest := ecs.CreateDescribeImagesRequest()
|
||||
describeImagesRequest.RegionId = defaultTestRegion
|
||||
describeImagesRequest.ImageId = imageId
|
||||
imagesResponse, err := client.DescribeImages(describeImagesRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("describe images failed due to %s", err)
|
||||
}
|
||||
|
||||
if len(imagesResponse.Images.Image) == 0 {
|
||||
return fmt.Errorf("image %s generated can not be found", imageId)
|
||||
}
|
||||
|
||||
image := imagesResponse.Images.Image[0]
|
||||
if len(image.DiskDeviceMappings.DiskDeviceMapping) != 1 {
|
||||
return fmt.Errorf("image %s should only contain one disks", imageId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderAcc_windows(t *testing.T) {
|
||||
t.Parallel()
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccWindows,
|
||||
})
|
||||
}
|
||||
|
||||
const testBuilderAccWindows = `
|
||||
{ "builders": [{
|
||||
"type": "test",
|
||||
"region": "cn-beijing",
|
||||
"instance_type": "ecs.n1.tiny",
|
||||
"source_image":"winsvr_64_dtcC_1809_en-us_40G_alibase_20190318.vhd",
|
||||
"io_optimized":"true",
|
||||
"communicator": "winrm",
|
||||
"winrm_port": 5985,
|
||||
"winrm_username": "Administrator",
|
||||
"winrm_password": "Test1234",
|
||||
"image_name": "packer-test-windows_{{timestamp}}",
|
||||
"user_data_file": "../../../examples/alicloud/basic/winrm_enable_userdata.ps1"
|
||||
}]
|
||||
}`
|
||||
|
||||
func TestBuilderAcc_regionCopy(t *testing.T) {
|
||||
t.Parallel()
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccRegionCopy,
|
||||
Check: checkRegionCopy([]string{"cn-hangzhou", "cn-shenzhen"}),
|
||||
})
|
||||
}
|
||||
|
||||
const testBuilderAccRegionCopy = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"region": "cn-beijing",
|
||||
"instance_type": "ecs.n1.tiny",
|
||||
"source_image":"ubuntu_18_04_64_20G_alibase_20190509.vhd",
|
||||
"io_optimized":"true",
|
||||
"ssh_username":"root",
|
||||
"image_name": "packer-test-regionCopy_{{timestamp}}",
|
||||
"image_copy_regions": ["cn-hangzhou", "cn-shenzhen"],
|
||||
"image_copy_names": ["packer-copy-test-hz_{{timestamp}}", "packer-copy-test-sz_{{timestamp}}"]
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
func checkRegionCopy(regions []string) builderT.TestCheckFunc {
|
||||
return func(artifacts []packer.Artifact) error {
|
||||
if len(artifacts) > 1 {
|
||||
return fmt.Errorf("more than 1 artifact")
|
||||
}
|
||||
|
||||
// Get the actual *Artifact pointer so we can access the AMIs directly
|
||||
artifactRaw := artifacts[0]
|
||||
artifact, ok := artifactRaw.(*Artifact)
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown artifact: %#v", artifactRaw)
|
||||
}
|
||||
|
||||
// Verify that we copied to only the regions given
|
||||
regionSet := make(map[string]struct{})
|
||||
for _, r := range regions {
|
||||
regionSet[r] = struct{}{}
|
||||
}
|
||||
|
||||
for r := range artifact.AlicloudImages {
|
||||
if r == "cn-beijing" {
|
||||
delete(regionSet, r)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := regionSet[r]; !ok {
|
||||
return fmt.Errorf("region %s is not the target region but found in artifacts", r)
|
||||
}
|
||||
|
||||
delete(regionSet, r)
|
||||
}
|
||||
|
||||
if len(regionSet) > 0 {
|
||||
return fmt.Errorf("following region(s) should be the copying targets but corresponding artifact(s) not found: %#v", regionSet)
|
||||
}
|
||||
|
||||
client, _ := testAliyunClient()
|
||||
for regionId, imageId := range artifact.AlicloudImages {
|
||||
describeImagesRequest := ecs.CreateDescribeImagesRequest()
|
||||
describeImagesRequest.RegionId = regionId
|
||||
describeImagesRequest.ImageId = imageId
|
||||
describeImagesRequest.Status = ImageStatusQueried
|
||||
describeImagesResponse, err := client.DescribeImages(describeImagesRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("describe generated image %s failed due to %s", imageId, err)
|
||||
}
|
||||
if len(describeImagesResponse.Images.Image) == 0 {
|
||||
return fmt.Errorf("image %s in artifacts can not be found", imageId)
|
||||
}
|
||||
|
||||
image := describeImagesResponse.Images.Image[0]
|
||||
if image.IsCopied && regionId == "cn-hangzhou" && !strings.HasPrefix(image.ImageName, "packer-copy-test-hz") {
|
||||
return fmt.Errorf("the name of image %s in artifacts should begin with %s but got %s", imageId, "packer-copy-test-hz", image.ImageName)
|
||||
}
|
||||
if image.IsCopied && regionId == "cn-shenzhen" && !strings.HasPrefix(image.ImageName, "packer-copy-test-sz") {
|
||||
return fmt.Errorf("the name of image %s in artifacts should begin with %s but got %s", imageId, "packer-copy-test-sz", image.ImageName)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderAcc_forceDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Build the same alicloud image twice, with ecs_image_force_delete on the second run
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Builder: &Builder{},
|
||||
Template: buildForceDeregisterConfig("false", "delete"),
|
||||
SkipArtifactTeardown: true,
|
||||
})
|
||||
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Builder: &Builder{},
|
||||
Template: buildForceDeregisterConfig("true", "delete"),
|
||||
})
|
||||
}
|
||||
|
||||
func buildForceDeregisterConfig(val, name string) string {
|
||||
return fmt.Sprintf(testBuilderAccForceDelete, val, name)
|
||||
}
|
||||
|
||||
const testBuilderAccForceDelete = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"region": "cn-beijing",
|
||||
"instance_type": "ecs.n1.tiny",
|
||||
"source_image":"ubuntu_18_04_64_20G_alibase_20190509.vhd",
|
||||
"io_optimized":"true",
|
||||
"ssh_username":"root",
|
||||
"image_force_delete": "%s",
|
||||
"image_name": "packer-test-forceDelete_%s"
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
func TestBuilderAcc_ECSImageSharing(t *testing.T) {
|
||||
t.Parallel()
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccSharing,
|
||||
Check: checkECSImageSharing("1309208528360047"),
|
||||
})
|
||||
}
|
||||
|
||||
const testBuilderAccSharing = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"region": "cn-beijing",
|
||||
"instance_type": "ecs.n1.tiny",
|
||||
"source_image":"ubuntu_18_04_64_20G_alibase_20190509.vhd",
|
||||
"io_optimized":"true",
|
||||
"ssh_username":"root",
|
||||
"image_name": "packer-test-ECSImageSharing_{{timestamp}}",
|
||||
"image_share_account":["1309208528360047"]
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
func checkECSImageSharing(uid string) builderT.TestCheckFunc {
|
||||
return func(artifacts []packer.Artifact) error {
|
||||
if len(artifacts) > 1 {
|
||||
return fmt.Errorf("more than 1 artifact")
|
||||
}
|
||||
|
||||
// Get the actual *Artifact pointer so we can access the AMIs directly
|
||||
artifactRaw := artifacts[0]
|
||||
artifact, ok := artifactRaw.(*Artifact)
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown artifact: %#v", artifactRaw)
|
||||
}
|
||||
|
||||
// describe the image, get block devices with a snapshot
|
||||
client, _ := testAliyunClient()
|
||||
|
||||
describeImageShareRequest := ecs.CreateDescribeImageSharePermissionRequest()
|
||||
describeImageShareRequest.RegionId = "cn-beijing"
|
||||
describeImageShareRequest.ImageId = artifact.AlicloudImages["cn-beijing"]
|
||||
imageShareResponse, err := client.DescribeImageSharePermission(describeImageShareRequest)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error retrieving Image Attributes for ECS Image Artifact (%#v) "+
|
||||
"in ECS Image Sharing Test: %s", artifact, err)
|
||||
}
|
||||
|
||||
if len(imageShareResponse.Accounts.Account) != 1 && imageShareResponse.Accounts.Account[0].AliyunId != uid {
|
||||
return fmt.Errorf("share account is incorrect %d", len(imageShareResponse.Accounts.Account))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderAcc_forceDeleteSnapshot(t *testing.T) {
|
||||
t.Parallel()
|
||||
destImageName := "delete"
|
||||
|
||||
// Build the same alicloud image name twice, with force_delete_snapshot on the second run
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Builder: &Builder{},
|
||||
Template: buildForceDeleteSnapshotConfig("false", destImageName),
|
||||
SkipArtifactTeardown: true,
|
||||
})
|
||||
|
||||
// Get image data by image image name
|
||||
client, _ := testAliyunClient()
|
||||
|
||||
describeImagesRequest := ecs.CreateDescribeImagesRequest()
|
||||
describeImagesRequest.RegionId = "cn-beijing"
|
||||
describeImagesRequest.ImageName = "packer-test-" + destImageName
|
||||
images, _ := client.DescribeImages(describeImagesRequest)
|
||||
|
||||
image := images.Images.Image[0]
|
||||
|
||||
// Get snapshot ids for image
|
||||
snapshotIds := []string{}
|
||||
for _, device := range image.DiskDeviceMappings.DiskDeviceMapping {
|
||||
if device.Device != "" && device.SnapshotId != "" {
|
||||
snapshotIds = append(snapshotIds, device.SnapshotId)
|
||||
}
|
||||
}
|
||||
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Builder: &Builder{},
|
||||
Template: buildForceDeleteSnapshotConfig("true", destImageName),
|
||||
Check: checkSnapshotsDeleted(snapshotIds),
|
||||
})
|
||||
}
|
||||
|
||||
func buildForceDeleteSnapshotConfig(val, name string) string {
|
||||
return fmt.Sprintf(testBuilderAccForceDeleteSnapshot, val, val, name)
|
||||
}
|
||||
|
||||
const testBuilderAccForceDeleteSnapshot = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"region": "cn-beijing",
|
||||
"instance_type": "ecs.n1.tiny",
|
||||
"source_image":"ubuntu_18_04_64_20G_alibase_20190509.vhd",
|
||||
"io_optimized":"true",
|
||||
"ssh_username":"root",
|
||||
"image_force_delete_snapshots": "%s",
|
||||
"image_force_delete": "%s",
|
||||
"image_name": "packer-test-%s"
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
func checkSnapshotsDeleted(snapshotIds []string) builderT.TestCheckFunc {
|
||||
return func(artifacts []packer.Artifact) error {
|
||||
// Verify the snapshots are gone
|
||||
client, _ := testAliyunClient()
|
||||
data, err := json.Marshal(snapshotIds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Marshal snapshotIds array failed %v", err)
|
||||
}
|
||||
|
||||
describeSnapshotsRequest := ecs.CreateDescribeSnapshotsRequest()
|
||||
describeSnapshotsRequest.RegionId = "cn-beijing"
|
||||
describeSnapshotsRequest.SnapshotIds = string(data)
|
||||
snapshotResp, err := client.DescribeSnapshots(describeSnapshotsRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Query snapshot failed %v", err)
|
||||
}
|
||||
snapshots := snapshotResp.Snapshots.Snapshot
|
||||
if len(snapshots) > 0 {
|
||||
return fmt.Errorf("Snapshots weren't successfully deleted by " +
|
||||
"`ecs_image_force_delete_snapshots`")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderAcc_imageTags(t *testing.T) {
|
||||
t.Parallel()
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccImageTags,
|
||||
Check: checkImageTags(),
|
||||
})
|
||||
}
|
||||
|
||||
const testBuilderAccImageTags = `
|
||||
{ "builders": [{
|
||||
"type": "test",
|
||||
"region": "cn-beijing",
|
||||
"instance_type": "ecs.n1.tiny",
|
||||
"source_image":"ubuntu_18_04_64_20G_alibase_20190509.vhd",
|
||||
"ssh_username": "root",
|
||||
"io_optimized":"true",
|
||||
"image_name": "packer-test-imageTags_{{timestamp}}",
|
||||
"tags": {
|
||||
"TagKey1": "TagValue1",
|
||||
"TagKey2": "TagValue2"
|
||||
}
|
||||
}]
|
||||
}`
|
||||
|
||||
func checkImageTags() builderT.TestCheckFunc {
|
||||
return func(artifacts []packer.Artifact) error {
|
||||
if len(artifacts) > 1 {
|
||||
return fmt.Errorf("more than 1 artifact")
|
||||
}
|
||||
// Get the actual *Artifact pointer so we can access the AMIs directly
|
||||
artifactRaw := artifacts[0]
|
||||
artifact, ok := artifactRaw.(*Artifact)
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown artifact: %#v", artifactRaw)
|
||||
}
|
||||
imageId := artifact.AlicloudImages[defaultTestRegion]
|
||||
|
||||
// describe the image, get block devices with a snapshot
|
||||
client, _ := testAliyunClient()
|
||||
|
||||
describeImageTagsRequest := ecs.CreateDescribeTagsRequest()
|
||||
describeImageTagsRequest.RegionId = defaultTestRegion
|
||||
describeImageTagsRequest.ResourceType = TagResourceImage
|
||||
describeImageTagsRequest.ResourceId = imageId
|
||||
imageTagsResponse, err := client.DescribeTags(describeImageTagsRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error retrieving Image Attributes for ECS Image Artifact (%#v) "+
|
||||
"in ECS Image Tags Test: %s", artifact, err)
|
||||
}
|
||||
|
||||
if len(imageTagsResponse.Tags.Tag) != 2 {
|
||||
return fmt.Errorf("expect 2 tags set on image %s but got %d", imageId, len(imageTagsResponse.Tags.Tag))
|
||||
}
|
||||
|
||||
for _, tag := range imageTagsResponse.Tags.Tag {
|
||||
if tag.TagKey != "TagKey1" && tag.TagKey != "TagKey2" {
|
||||
return fmt.Errorf("tags on image %s should be within the list of TagKey1 and TagKey2 but got %s", imageId, tag.TagKey)
|
||||
}
|
||||
|
||||
if tag.TagKey == "TagKey1" && tag.TagValue != "TagValue1" {
|
||||
return fmt.Errorf("the value for tag %s on image %s should be TagValue1 but got %s", tag.TagKey, imageId, tag.TagValue)
|
||||
} else if tag.TagKey == "TagKey2" && tag.TagValue != "TagValue2" {
|
||||
return fmt.Errorf("the value for tag %s on image %s should be TagValue2 but got %s", tag.TagKey, imageId, tag.TagValue)
|
||||
}
|
||||
}
|
||||
|
||||
describeImagesRequest := ecs.CreateDescribeImagesRequest()
|
||||
describeImagesRequest.RegionId = defaultTestRegion
|
||||
describeImagesRequest.ImageId = imageId
|
||||
imagesResponse, err := client.DescribeImages(describeImagesRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("describe images failed due to %s", err)
|
||||
}
|
||||
|
||||
if len(imagesResponse.Images.Image) == 0 {
|
||||
return fmt.Errorf("image %s generated can not be found", imageId)
|
||||
}
|
||||
|
||||
image := imagesResponse.Images.Image[0]
|
||||
for _, mapping := range image.DiskDeviceMappings.DiskDeviceMapping {
|
||||
describeSnapshotTagsRequest := ecs.CreateDescribeTagsRequest()
|
||||
describeSnapshotTagsRequest.RegionId = defaultTestRegion
|
||||
describeSnapshotTagsRequest.ResourceType = TagResourceSnapshot
|
||||
describeSnapshotTagsRequest.ResourceId = mapping.SnapshotId
|
||||
snapshotTagsResponse, err := client.DescribeTags(describeSnapshotTagsRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get snapshot tags due to %s", err)
|
||||
}
|
||||
|
||||
if len(snapshotTagsResponse.Tags.Tag) != 2 {
|
||||
return fmt.Errorf("expect 2 tags set on snapshot %s but got %d", mapping.SnapshotId, len(snapshotTagsResponse.Tags.Tag))
|
||||
}
|
||||
|
||||
for _, tag := range snapshotTagsResponse.Tags.Tag {
|
||||
if tag.TagKey != "TagKey1" && tag.TagKey != "TagKey2" {
|
||||
return fmt.Errorf("tags on snapshot %s should be within the list of TagKey1 and TagKey2 but got %s", mapping.SnapshotId, tag.TagKey)
|
||||
}
|
||||
|
||||
if tag.TagKey == "TagKey1" && tag.TagValue != "TagValue1" {
|
||||
return fmt.Errorf("the value for tag %s on snapshot %s should be TagValue1 but got %s", tag.TagKey, mapping.SnapshotId, tag.TagValue)
|
||||
} else if tag.TagKey == "TagKey2" && tag.TagValue != "TagValue2" {
|
||||
return fmt.Errorf("the value for tag %s on snapshot %s should be TagValue2 but got %s", tag.TagKey, mapping.SnapshotId, tag.TagValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderAcc_dataDiskEncrypted(t *testing.T) {
|
||||
t.Parallel()
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccDataDiskEncrypted,
|
||||
Check: checkDataDiskEncrypted(),
|
||||
})
|
||||
}
|
||||
|
||||
const testBuilderAccDataDiskEncrypted = `
|
||||
{ "builders": [{
|
||||
"type": "test",
|
||||
"region": "cn-beijing",
|
||||
"instance_type": "ecs.n1.tiny",
|
||||
"source_image":"ubuntu_18_04_64_20G_alibase_20190509.vhd",
|
||||
"io_optimized":"true",
|
||||
"ssh_username":"root",
|
||||
"image_name": "packer-test-dataDiskEncrypted_{{timestamp}}",
|
||||
"image_disk_mappings": [
|
||||
{
|
||||
"disk_name": "data_disk1",
|
||||
"disk_size": 25,
|
||||
"disk_encrypted": true,
|
||||
"disk_delete_with_instance": true
|
||||
},
|
||||
{
|
||||
"disk_name": "data_disk2",
|
||||
"disk_size": 35,
|
||||
"disk_encrypted": false,
|
||||
"disk_delete_with_instance": true
|
||||
},
|
||||
{
|
||||
"disk_name": "data_disk3",
|
||||
"disk_size": 45,
|
||||
"disk_delete_with_instance": true
|
||||
}
|
||||
]
|
||||
}]
|
||||
}`
|
||||
|
||||
func checkDataDiskEncrypted() builderT.TestCheckFunc {
|
||||
return func(artifacts []packer.Artifact) error {
|
||||
if len(artifacts) > 1 {
|
||||
return fmt.Errorf("more than 1 artifact")
|
||||
}
|
||||
|
||||
// Get the actual *Artifact pointer so we can access the AMIs directly
|
||||
artifactRaw := artifacts[0]
|
||||
artifact, ok := artifactRaw.(*Artifact)
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown artifact: %#v", artifactRaw)
|
||||
}
|
||||
imageId := artifact.AlicloudImages[defaultTestRegion]
|
||||
|
||||
// describe the image, get block devices with a snapshot
|
||||
client, _ := testAliyunClient()
|
||||
|
||||
describeImagesRequest := ecs.CreateDescribeImagesRequest()
|
||||
describeImagesRequest.RegionId = defaultTestRegion
|
||||
describeImagesRequest.ImageId = imageId
|
||||
imagesResponse, err := client.DescribeImages(describeImagesRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("describe images failed due to %s", err)
|
||||
}
|
||||
|
||||
if len(imagesResponse.Images.Image) == 0 {
|
||||
return fmt.Errorf("image %s generated can not be found", imageId)
|
||||
}
|
||||
image := imagesResponse.Images.Image[0]
|
||||
|
||||
var snapshotIds []string
|
||||
for _, mapping := range image.DiskDeviceMappings.DiskDeviceMapping {
|
||||
snapshotIds = append(snapshotIds, mapping.SnapshotId)
|
||||
}
|
||||
|
||||
data, _ := json.Marshal(snapshotIds)
|
||||
|
||||
describeSnapshotRequest := ecs.CreateDescribeSnapshotsRequest()
|
||||
describeSnapshotRequest.RegionId = defaultTestRegion
|
||||
describeSnapshotRequest.SnapshotIds = string(data)
|
||||
describeSnapshotsResponse, err := client.DescribeSnapshots(describeSnapshotRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("describe data snapshots failed due to %s", err)
|
||||
}
|
||||
if len(describeSnapshotsResponse.Snapshots.Snapshot) != 4 {
|
||||
return fmt.Errorf("expect %d data snapshots but got %d", len(snapshotIds), len(describeSnapshotsResponse.Snapshots.Snapshot))
|
||||
}
|
||||
snapshots := describeSnapshotsResponse.Snapshots.Snapshot
|
||||
for _, snapshot := range snapshots {
|
||||
if snapshot.SourceDiskType == DiskTypeSystem {
|
||||
if snapshot.Encrypted != false {
|
||||
return fmt.Errorf("the system snapshot expected to be non-encrypted but got true")
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if snapshot.SourceDiskSize == "25" && snapshot.Encrypted != true {
|
||||
return fmt.Errorf("the first snapshot expected to be encrypted but got false")
|
||||
}
|
||||
|
||||
if snapshot.SourceDiskSize == "35" && snapshot.Encrypted != false {
|
||||
return fmt.Errorf("the second snapshot expected to be non-encrypted but got true")
|
||||
}
|
||||
|
||||
if snapshot.SourceDiskSize == "45" && snapshot.Encrypted != false {
|
||||
return fmt.Errorf("the third snapshot expected to be non-encrypted but got true")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderAcc_systemDiskEncrypted(t *testing.T) {
|
||||
t.Parallel()
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccSystemDiskEncrypted,
|
||||
Check: checkSystemDiskEncrypted(),
|
||||
})
|
||||
}
|
||||
|
||||
const testBuilderAccSystemDiskEncrypted = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"region": "cn-beijing",
|
||||
"instance_type": "ecs.n1.tiny",
|
||||
"source_image":"ubuntu_18_04_64_20G_alibase_20190509.vhd",
|
||||
"io_optimized":"true",
|
||||
"ssh_username":"root",
|
||||
"image_name": "packer-test_{{timestamp}}",
|
||||
"image_encrypted": "true"
|
||||
}]
|
||||
}`
|
||||
|
||||
func checkSystemDiskEncrypted() builderT.TestCheckFunc {
|
||||
return func(artifacts []packer.Artifact) error {
|
||||
if len(artifacts) > 1 {
|
||||
return fmt.Errorf("more than 1 artifact")
|
||||
}
|
||||
|
||||
// Get the actual *Artifact pointer so we can access the AMIs directly
|
||||
artifactRaw := artifacts[0]
|
||||
artifact, ok := artifactRaw.(*Artifact)
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown artifact: %#v", artifactRaw)
|
||||
}
|
||||
|
||||
// describe the image, get block devices with a snapshot
|
||||
client, _ := testAliyunClient()
|
||||
imageId := artifact.AlicloudImages[defaultTestRegion]
|
||||
|
||||
describeImagesRequest := ecs.CreateDescribeImagesRequest()
|
||||
describeImagesRequest.RegionId = defaultTestRegion
|
||||
describeImagesRequest.ImageId = imageId
|
||||
describeImagesRequest.Status = ImageStatusQueried
|
||||
imagesResponse, err := client.DescribeImages(describeImagesRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("describe images failed due to %s", err)
|
||||
}
|
||||
|
||||
if len(imagesResponse.Images.Image) == 0 {
|
||||
return fmt.Errorf("image %s generated can not be found", imageId)
|
||||
}
|
||||
|
||||
image := imagesResponse.Images.Image[0]
|
||||
if image.IsCopied == false {
|
||||
return fmt.Errorf("image %s generated expexted to be copied but false", image.ImageId)
|
||||
}
|
||||
|
||||
describeSnapshotRequest := ecs.CreateDescribeSnapshotsRequest()
|
||||
describeSnapshotRequest.RegionId = defaultTestRegion
|
||||
describeSnapshotRequest.SnapshotIds = fmt.Sprintf("[\"%s\"]", image.DiskDeviceMappings.DiskDeviceMapping[0].SnapshotId)
|
||||
describeSnapshotsResponse, err := client.DescribeSnapshots(describeSnapshotRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("describe system snapshots failed due to %s", err)
|
||||
}
|
||||
snapshots := describeSnapshotsResponse.Snapshots.Snapshot[0]
|
||||
|
||||
if snapshots.Encrypted != true {
|
||||
return fmt.Errorf("system snapshot of image %s expected to be encrypted but got false", imageId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccPreCheck(t *testing.T) {
|
||||
if v := os.Getenv("ALICLOUD_ACCESS_KEY"); v == "" {
|
||||
t.Fatal("ALICLOUD_ACCESS_KEY must be set for acceptance tests")
|
||||
}
|
||||
|
||||
if v := os.Getenv("ALICLOUD_SECRET_KEY"); v == "" {
|
||||
t.Fatal("ALICLOUD_SECRET_KEY must be set for acceptance tests")
|
||||
}
|
||||
}
|
||||
|
||||
func testAliyunClient() (*ClientWrapper, error) {
|
||||
access := &AlicloudAccessConfig{AlicloudRegion: "cn-beijing"}
|
||||
err := access.Config()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := access.Client()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
237
builder/alicloud/ecs/builder_test.go
Normal file
237
builder/alicloud/ecs/builder_test.go
Normal file
@ -0,0 +1,237 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer/packer"
|
||||
helperconfig "github.com/hashicorp/packer/packer-plugin-sdk/config"
|
||||
)
|
||||
|
||||
func testBuilderConfig() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"access_key": "foo",
|
||||
"secret_key": "bar",
|
||||
"source_image": "foo",
|
||||
"instance_type": "ecs.n1.tiny",
|
||||
"region": "cn-beijing",
|
||||
"ssh_username": "root",
|
||||
"image_name": "foo",
|
||||
"io_optimized": true,
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilder_ImplementsBuilder(t *testing.T) {
|
||||
var raw interface{}
|
||||
raw = &Builder{}
|
||||
if _, ok := raw.(packer.Builder); !ok {
|
||||
t.Fatalf("Builder should be a builder")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilder_Prepare_BadType(t *testing.T) {
|
||||
b := &Builder{}
|
||||
c := map[string]interface{}{
|
||||
"access_key": []string{},
|
||||
}
|
||||
|
||||
_, warnings, err := b.Prepare(c)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatalf("prepare should fail")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_ECSImageName(t *testing.T) {
|
||||
var b Builder
|
||||
config := testBuilderConfig()
|
||||
|
||||
// Test good
|
||||
config["image_name"] = "ecs.n1.tiny"
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
// Test bad
|
||||
config["ecs_image_name"] = "foo {{"
|
||||
b = Builder{}
|
||||
_, warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
|
||||
// Test bad
|
||||
delete(config, "image_name")
|
||||
b = Builder{}
|
||||
_, warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_InvalidKey(t *testing.T) {
|
||||
var b Builder
|
||||
config := testBuilderConfig()
|
||||
|
||||
// Add a random key
|
||||
config["i_should_not_be_valid"] = true
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_Devices(t *testing.T) {
|
||||
var b Builder
|
||||
config := testBuilderConfig()
|
||||
config["system_disk_mapping"] = map[string]interface{}{
|
||||
"disk_category": "cloud",
|
||||
"disk_description": "system disk",
|
||||
"disk_name": "system_disk",
|
||||
"disk_size": 60,
|
||||
}
|
||||
config["image_disk_mappings"] = []map[string]interface{}{
|
||||
{
|
||||
"disk_category": "cloud_efficiency",
|
||||
"disk_name": "data_disk1",
|
||||
"disk_size": 100,
|
||||
"disk_snapshot_id": "s-1",
|
||||
"disk_description": "data disk1",
|
||||
"disk_device": "/dev/xvdb",
|
||||
"disk_delete_with_instance": false,
|
||||
},
|
||||
{
|
||||
"disk_name": "data_disk2",
|
||||
"disk_device": "/dev/xvdc",
|
||||
},
|
||||
}
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
expected := AlicloudDiskDevice{
|
||||
DiskCategory: "cloud",
|
||||
Description: "system disk",
|
||||
DiskName: "system_disk",
|
||||
DiskSize: 60,
|
||||
Encrypted: helperconfig.TriUnset,
|
||||
}
|
||||
if !reflect.DeepEqual(b.config.ECSSystemDiskMapping, expected) {
|
||||
t.Fatalf("system disk is not set properly, actual: %v; expected: %v", b.config.ECSSystemDiskMapping, expected)
|
||||
}
|
||||
if !reflect.DeepEqual(b.config.ECSImagesDiskMappings, []AlicloudDiskDevice{
|
||||
{
|
||||
DiskCategory: "cloud_efficiency",
|
||||
DiskName: "data_disk1",
|
||||
DiskSize: 100,
|
||||
SnapshotId: "s-1",
|
||||
Description: "data disk1",
|
||||
Device: "/dev/xvdb",
|
||||
DeleteWithInstance: false,
|
||||
},
|
||||
{
|
||||
DiskName: "data_disk2",
|
||||
Device: "/dev/xvdc",
|
||||
},
|
||||
}) {
|
||||
t.Fatalf("data disks are not set properly, actual: %#v", b.config.ECSImagesDiskMappings)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_IgnoreDataDisks(t *testing.T) {
|
||||
var b Builder
|
||||
config := testBuilderConfig()
|
||||
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
if b.config.AlicloudImageIgnoreDataDisks != false {
|
||||
t.Fatalf("image_ignore_data_disks is not set properly, expect: %t, actual: %t", false, b.config.AlicloudImageIgnoreDataDisks)
|
||||
}
|
||||
|
||||
config["image_ignore_data_disks"] = "false"
|
||||
_, warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
if b.config.AlicloudImageIgnoreDataDisks != false {
|
||||
t.Fatalf("image_ignore_data_disks is not set properly, expect: %t, actual: %t", false, b.config.AlicloudImageIgnoreDataDisks)
|
||||
}
|
||||
|
||||
config["image_ignore_data_disks"] = "true"
|
||||
_, warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
if b.config.AlicloudImageIgnoreDataDisks != true {
|
||||
t.Fatalf("image_ignore_data_disks is not set properly, expect: %t, actual: %t", true, b.config.AlicloudImageIgnoreDataDisks)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_WaitSnapshotReadyTimeout(t *testing.T) {
|
||||
var b Builder
|
||||
config := testBuilderConfig()
|
||||
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
if b.config.WaitSnapshotReadyTimeout != 0 {
|
||||
t.Fatalf("wait_snapshot_ready_timeout is not set properly, expect: %d, actual: %d", 0, b.config.WaitSnapshotReadyTimeout)
|
||||
}
|
||||
if b.getSnapshotReadyTimeout() != ALICLOUD_DEFAULT_LONG_TIMEOUT {
|
||||
t.Fatalf("default timeout is not set properly, expect: %d, actual: %d", ALICLOUD_DEFAULT_LONG_TIMEOUT, b.getSnapshotReadyTimeout())
|
||||
}
|
||||
|
||||
config["wait_snapshot_ready_timeout"] = ALICLOUD_DEFAULT_TIMEOUT
|
||||
_, warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
if b.config.WaitSnapshotReadyTimeout != ALICLOUD_DEFAULT_TIMEOUT {
|
||||
t.Fatalf("wait_snapshot_ready_timeout is not set properly, expect: %d, actual: %d", ALICLOUD_DEFAULT_TIMEOUT, b.config.WaitSnapshotReadyTimeout)
|
||||
}
|
||||
|
||||
if b.getSnapshotReadyTimeout() != ALICLOUD_DEFAULT_TIMEOUT {
|
||||
t.Fatalf("default timeout is not set properly, expect: %d, actual: %d", ALICLOUD_DEFAULT_TIMEOUT, b.getSnapshotReadyTimeout())
|
||||
}
|
||||
}
|
310
builder/alicloud/ecs/client.go
Normal file
310
builder/alicloud/ecs/client.go
Normal file
@ -0,0 +1,310 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
)
|
||||
|
||||
type ClientWrapper struct {
|
||||
*ecs.Client
|
||||
}
|
||||
|
||||
const (
|
||||
InstanceStatusRunning = "Running"
|
||||
InstanceStatusStarting = "Starting"
|
||||
InstanceStatusStopped = "Stopped"
|
||||
InstanceStatusStopping = "Stopping"
|
||||
)
|
||||
|
||||
const (
|
||||
ImageStatusWaiting = "Waiting"
|
||||
ImageStatusCreating = "Creating"
|
||||
ImageStatusCreateFailed = "CreateFailed"
|
||||
ImageStatusAvailable = "Available"
|
||||
)
|
||||
|
||||
var ImageStatusQueried = fmt.Sprintf("%s,%s,%s,%s", ImageStatusWaiting, ImageStatusCreating, ImageStatusCreateFailed, ImageStatusAvailable)
|
||||
|
||||
const (
|
||||
SnapshotStatusAll = "all"
|
||||
SnapshotStatusProgressing = "progressing"
|
||||
SnapshotStatusAccomplished = "accomplished"
|
||||
SnapshotStatusFailed = "failed"
|
||||
)
|
||||
|
||||
const (
|
||||
DiskStatusInUse = "In_use"
|
||||
DiskStatusAvailable = "Available"
|
||||
DiskStatusAttaching = "Attaching"
|
||||
DiskStatusDetaching = "Detaching"
|
||||
DiskStatusCreating = "Creating"
|
||||
DiskStatusReIniting = "ReIniting"
|
||||
)
|
||||
|
||||
const (
|
||||
VpcStatusPending = "Pending"
|
||||
VpcStatusAvailable = "Available"
|
||||
)
|
||||
|
||||
const (
|
||||
VSwitchStatusPending = "Pending"
|
||||
VSwitchStatusAvailable = "Available"
|
||||
)
|
||||
|
||||
const (
|
||||
EipStatusAssociating = "Associating"
|
||||
EipStatusUnassociating = "Unassociating"
|
||||
EipStatusInUse = "InUse"
|
||||
EipStatusAvailable = "Available"
|
||||
)
|
||||
|
||||
const (
|
||||
ImageOwnerSystem = "system"
|
||||
ImageOwnerSelf = "self"
|
||||
ImageOwnerOthers = "others"
|
||||
ImageOwnerMarketplace = "marketplace"
|
||||
)
|
||||
|
||||
const (
|
||||
IOOptimizedNone = "none"
|
||||
IOOptimizedOptimized = "optimized"
|
||||
)
|
||||
|
||||
const (
|
||||
InstanceNetworkClassic = "classic"
|
||||
InstanceNetworkVpc = "vpc"
|
||||
)
|
||||
|
||||
const (
|
||||
DiskTypeSystem = "system"
|
||||
DiskTypeData = "data"
|
||||
)
|
||||
|
||||
const (
|
||||
TagResourceImage = "image"
|
||||
TagResourceInstance = "instance"
|
||||
TagResourceSnapshot = "snapshot"
|
||||
TagResourceDisk = "disk"
|
||||
)
|
||||
|
||||
const (
|
||||
IpProtocolAll = "all"
|
||||
IpProtocolTCP = "tcp"
|
||||
IpProtocolUDP = "udp"
|
||||
IpProtocolICMP = "icmp"
|
||||
IpProtocolGRE = "gre"
|
||||
)
|
||||
|
||||
const (
|
||||
NicTypeInternet = "internet"
|
||||
NicTypeIntranet = "intranet"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultPortRange = "-1/-1"
|
||||
DefaultCidrIp = "0.0.0.0/0"
|
||||
DefaultCidrBlock = "172.16.0.0/24"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultRetryInterval = 5 * time.Second
|
||||
defaultRetryTimes = 12
|
||||
shortRetryTimes = 36
|
||||
mediumRetryTimes = 360
|
||||
longRetryTimes = 720
|
||||
)
|
||||
|
||||
type WaitForExpectEvalResult struct {
|
||||
evalPass bool
|
||||
stopRetry bool
|
||||
}
|
||||
|
||||
var (
|
||||
WaitForExpectSuccess = WaitForExpectEvalResult{
|
||||
evalPass: true,
|
||||
stopRetry: true,
|
||||
}
|
||||
|
||||
WaitForExpectToRetry = WaitForExpectEvalResult{
|
||||
evalPass: false,
|
||||
stopRetry: false,
|
||||
}
|
||||
|
||||
WaitForExpectFailToStop = WaitForExpectEvalResult{
|
||||
evalPass: false,
|
||||
stopRetry: true,
|
||||
}
|
||||
)
|
||||
|
||||
type WaitForExpectArgs struct {
|
||||
RequestFunc func() (responses.AcsResponse, error)
|
||||
EvalFunc func(response responses.AcsResponse, err error) WaitForExpectEvalResult
|
||||
RetryInterval time.Duration
|
||||
RetryTimes int
|
||||
RetryTimeout time.Duration
|
||||
}
|
||||
|
||||
func (c *ClientWrapper) WaitForExpected(args *WaitForExpectArgs) (responses.AcsResponse, error) {
|
||||
if args.RetryInterval <= 0 {
|
||||
args.RetryInterval = defaultRetryInterval
|
||||
}
|
||||
if args.RetryTimes <= 0 {
|
||||
args.RetryTimes = defaultRetryTimes
|
||||
}
|
||||
|
||||
var timeoutPoint time.Time
|
||||
if args.RetryTimeout > 0 {
|
||||
timeoutPoint = time.Now().Add(args.RetryTimeout)
|
||||
}
|
||||
|
||||
var lastResponse responses.AcsResponse
|
||||
var lastError error
|
||||
|
||||
for i := 0; ; i++ {
|
||||
if args.RetryTimeout > 0 && time.Now().After(timeoutPoint) {
|
||||
break
|
||||
}
|
||||
|
||||
if args.RetryTimeout <= 0 && i >= args.RetryTimes {
|
||||
break
|
||||
}
|
||||
|
||||
response, err := args.RequestFunc()
|
||||
lastResponse = response
|
||||
lastError = err
|
||||
|
||||
evalResult := args.EvalFunc(response, err)
|
||||
if evalResult.evalPass {
|
||||
return response, nil
|
||||
}
|
||||
if evalResult.stopRetry {
|
||||
return response, err
|
||||
}
|
||||
|
||||
time.Sleep(args.RetryInterval)
|
||||
}
|
||||
|
||||
if lastError == nil {
|
||||
lastError = fmt.Errorf("<no error>")
|
||||
}
|
||||
|
||||
if args.RetryTimeout > 0 {
|
||||
return lastResponse, fmt.Errorf("evaluate failed after %d seconds timeout with %d seconds retry interval: %s", int(args.RetryTimeout.Seconds()), int(args.RetryInterval.Seconds()), lastError)
|
||||
}
|
||||
|
||||
return lastResponse, fmt.Errorf("evaluate failed after %d times retry with %d seconds retry interval: %s", args.RetryTimes, int(args.RetryInterval.Seconds()), lastError)
|
||||
}
|
||||
|
||||
func (c *ClientWrapper) WaitForInstanceStatus(regionId string, instanceId string, expectedStatus string) (responses.AcsResponse, error) {
|
||||
return c.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
request := ecs.CreateDescribeInstancesRequest()
|
||||
request.RegionId = regionId
|
||||
request.InstanceIds = fmt.Sprintf("[\"%s\"]", instanceId)
|
||||
return c.DescribeInstances(request)
|
||||
},
|
||||
EvalFunc: func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
|
||||
if err != nil {
|
||||
return WaitForExpectToRetry
|
||||
}
|
||||
|
||||
instancesResponse := response.(*ecs.DescribeInstancesResponse)
|
||||
instances := instancesResponse.Instances.Instance
|
||||
for _, instance := range instances {
|
||||
if instance.Status == expectedStatus {
|
||||
return WaitForExpectSuccess
|
||||
}
|
||||
}
|
||||
return WaitForExpectToRetry
|
||||
},
|
||||
RetryTimes: mediumRetryTimes,
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ClientWrapper) WaitForImageStatus(regionId string, imageId string, expectedStatus string, timeout time.Duration) (responses.AcsResponse, error) {
|
||||
return c.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
request := ecs.CreateDescribeImagesRequest()
|
||||
request.RegionId = regionId
|
||||
request.ImageId = imageId
|
||||
request.Status = ImageStatusQueried
|
||||
return c.DescribeImages(request)
|
||||
},
|
||||
EvalFunc: func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
|
||||
if err != nil {
|
||||
return WaitForExpectToRetry
|
||||
}
|
||||
|
||||
imagesResponse := response.(*ecs.DescribeImagesResponse)
|
||||
images := imagesResponse.Images.Image
|
||||
for _, image := range images {
|
||||
if image.Status == expectedStatus {
|
||||
return WaitForExpectSuccess
|
||||
}
|
||||
}
|
||||
|
||||
return WaitForExpectToRetry
|
||||
},
|
||||
RetryTimeout: timeout,
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ClientWrapper) WaitForSnapshotStatus(regionId string, snapshotId string, expectedStatus string, timeout time.Duration) (responses.AcsResponse, error) {
|
||||
return c.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
request := ecs.CreateDescribeSnapshotsRequest()
|
||||
request.RegionId = regionId
|
||||
request.SnapshotIds = fmt.Sprintf("[\"%s\"]", snapshotId)
|
||||
return c.DescribeSnapshots(request)
|
||||
},
|
||||
EvalFunc: func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
|
||||
if err != nil {
|
||||
return WaitForExpectToRetry
|
||||
}
|
||||
|
||||
snapshotsResponse := response.(*ecs.DescribeSnapshotsResponse)
|
||||
snapshots := snapshotsResponse.Snapshots.Snapshot
|
||||
for _, snapshot := range snapshots {
|
||||
if snapshot.Status == expectedStatus {
|
||||
return WaitForExpectSuccess
|
||||
}
|
||||
}
|
||||
return WaitForExpectToRetry
|
||||
},
|
||||
RetryTimeout: timeout,
|
||||
})
|
||||
}
|
||||
|
||||
type EvalErrorType bool
|
||||
|
||||
const (
|
||||
EvalRetryErrorType = EvalErrorType(true)
|
||||
EvalNotRetryErrorType = EvalErrorType(false)
|
||||
)
|
||||
|
||||
func (c *ClientWrapper) EvalCouldRetryResponse(evalErrors []string, evalErrorType EvalErrorType) func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
|
||||
return func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
|
||||
if err == nil {
|
||||
return WaitForExpectSuccess
|
||||
}
|
||||
|
||||
e, ok := err.(errors.Error)
|
||||
if !ok {
|
||||
return WaitForExpectToRetry
|
||||
}
|
||||
|
||||
if evalErrorType == EvalRetryErrorType && !ContainsInArray(evalErrors, e.ErrorCode()) {
|
||||
return WaitForExpectFailToStop
|
||||
}
|
||||
|
||||
if evalErrorType == EvalNotRetryErrorType && ContainsInArray(evalErrors, e.ErrorCode()) {
|
||||
return WaitForExpectFailToStop
|
||||
}
|
||||
|
||||
return WaitForExpectToRetry
|
||||
}
|
||||
}
|
80
builder/alicloud/ecs/client_test.go
Normal file
80
builder/alicloud/ecs/client_test.go
Normal file
@ -0,0 +1,80 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
|
||||
)
|
||||
|
||||
func TestWaitForExpectedExceedRetryTimes(t *testing.T) {
|
||||
c := ClientWrapper{}
|
||||
|
||||
iter := 0
|
||||
waitDone := make(chan bool, 1)
|
||||
|
||||
go func() {
|
||||
_, _ = c.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
iter++
|
||||
return nil, fmt.Errorf("test: let iteration %d failed", iter)
|
||||
},
|
||||
EvalFunc: func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
|
||||
if err != nil {
|
||||
fmt.Printf("need retry: %s\n", err)
|
||||
return WaitForExpectToRetry
|
||||
}
|
||||
|
||||
return WaitForExpectSuccess
|
||||
},
|
||||
})
|
||||
|
||||
waitDone <- true
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-waitDone:
|
||||
if iter != defaultRetryTimes {
|
||||
t.Fatalf("WaitForExpected should terminate at the %d iterations", defaultRetryTimes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWaitForExpectedExceedRetryTimeout(t *testing.T) {
|
||||
c := ClientWrapper{}
|
||||
|
||||
expectTimeout := 10 * time.Second
|
||||
iter := 0
|
||||
waitDone := make(chan bool, 1)
|
||||
|
||||
go func() {
|
||||
_, _ = c.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
iter++
|
||||
return nil, fmt.Errorf("test: let iteration %d failed", iter)
|
||||
},
|
||||
EvalFunc: func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
|
||||
if err != nil {
|
||||
fmt.Printf("need retry: %s\n", err)
|
||||
return WaitForExpectToRetry
|
||||
}
|
||||
|
||||
return WaitForExpectSuccess
|
||||
},
|
||||
RetryTimeout: expectTimeout,
|
||||
})
|
||||
|
||||
waitDone <- true
|
||||
}()
|
||||
|
||||
timeTolerance := 1 * time.Second
|
||||
select {
|
||||
case <-waitDone:
|
||||
if iter > int(expectTimeout/defaultRetryInterval) {
|
||||
t.Fatalf("WaitForExpected should terminate before the %d iterations", int(expectTimeout/defaultRetryInterval))
|
||||
}
|
||||
case <-time.After(expectTimeout + timeTolerance):
|
||||
t.Fatalf("WaitForExpected should terminate within %f seconds", (expectTimeout + timeTolerance).Seconds())
|
||||
}
|
||||
}
|
198
builder/alicloud/ecs/image_config.go
Normal file
198
builder/alicloud/ecs/image_config.go
Normal file
@ -0,0 +1,198 @@
|
||||
//go:generate struct-markdown
|
||||
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/packer/hcl2template"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/config"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
// The "AlicloudDiskDevice" object us used for the `ECSSystemDiskMapping` and
|
||||
// `ECSImagesDiskMappings` options, and contains the following fields:
|
||||
type AlicloudDiskDevice struct {
|
||||
// The value of disk name is blank by default. [2,
|
||||
// 128] English or Chinese characters, must begin with an
|
||||
// uppercase/lowercase letter or Chinese character. Can contain numbers,
|
||||
// ., _ and -. The disk name will appear on the console. It cannot
|
||||
// begin with `http://` or `https://`.
|
||||
DiskName string `mapstructure:"disk_name" required:"false"`
|
||||
// Category of the system disk. Optional values are:
|
||||
// - cloud - general cloud disk
|
||||
// - cloud_efficiency - efficiency cloud disk
|
||||
// - cloud_ssd - cloud SSD
|
||||
DiskCategory string `mapstructure:"disk_category" required:"false"`
|
||||
// Size of the system disk, measured in GiB. Value
|
||||
// range: [20, 500]. The specified value must be equal to or greater
|
||||
// than max{20, ImageSize}. Default value: max{40, ImageSize}.
|
||||
DiskSize int `mapstructure:"disk_size" required:"false"`
|
||||
// Snapshots are used to create the data
|
||||
// disk After this parameter is specified, Size is ignored. The actual
|
||||
// size of the created disk is the size of the specified snapshot.
|
||||
// This field is only used in the ECSImagesDiskMappings option, not
|
||||
// the ECSSystemDiskMapping option.
|
||||
SnapshotId string `mapstructure:"disk_snapshot_id" required:"false"`
|
||||
// The value of disk description is blank by
|
||||
// default. [2, 256] characters. The disk description will appear on the
|
||||
// console. It cannot begin with `http://` or `https://`.
|
||||
Description string `mapstructure:"disk_description" required:"false"`
|
||||
// Whether or not the disk is
|
||||
// released along with the instance:
|
||||
DeleteWithInstance bool `mapstructure:"disk_delete_with_instance" required:"false"`
|
||||
// Device information of the related instance:
|
||||
// such as /dev/xvdb It is null unless the Status is In_use.
|
||||
Device string `mapstructure:"disk_device" required:"false"`
|
||||
// Whether or not to encrypt the data disk.
|
||||
// If this option is set to true, the data disk will be encryped and
|
||||
// corresponding snapshot in the target image will also be encrypted. By
|
||||
// default, if this is an extra data disk, Packer will not encrypt the
|
||||
// data disk. Otherwise, Packer will keep the encryption setting to what
|
||||
// it was in the source image. Please refer to Introduction of ECS disk
|
||||
// encryption for more details.
|
||||
Encrypted config.Trilean `mapstructure:"disk_encrypted" required:"false"`
|
||||
}
|
||||
|
||||
// The "AlicloudDiskDevices" object is used to define disk mappings for your
|
||||
// instance.
|
||||
type AlicloudDiskDevices struct {
|
||||
// Image disk mapping for the system disk.
|
||||
// See the [disk device configuration](#disk-devices-configuration) section
|
||||
// for more information on options.
|
||||
// Usage example:
|
||||
//
|
||||
// ```json
|
||||
// "builders": [{
|
||||
// "type":"alicloud-ecs",
|
||||
// "system_disk_mapping": {
|
||||
// "disk_size": 50,
|
||||
// "disk_name": "mydisk"
|
||||
// },
|
||||
// ...
|
||||
// }
|
||||
// ```
|
||||
ECSSystemDiskMapping AlicloudDiskDevice `mapstructure:"system_disk_mapping" required:"false"`
|
||||
// Add one or more data disks to the image.
|
||||
// See the [disk device configuration](#disk-devices-configuration) section
|
||||
// for more information on options.
|
||||
// Usage example:
|
||||
//
|
||||
// ```json
|
||||
// "builders": [{
|
||||
// "type":"alicloud-ecs",
|
||||
// "image_disk_mappings": [
|
||||
// {
|
||||
// "disk_snapshot_id": "someid",
|
||||
// "disk_device": "dev/xvdb"
|
||||
// }
|
||||
// ],
|
||||
// ...
|
||||
// }
|
||||
// ```
|
||||
ECSImagesDiskMappings []AlicloudDiskDevice `mapstructure:"image_disk_mappings" required:"false"`
|
||||
}
|
||||
|
||||
type AlicloudImageConfig struct {
|
||||
// The name of the user-defined image, [2, 128] English or Chinese
|
||||
// characters. It must begin with an uppercase/lowercase letter or a
|
||||
// Chinese character, and may contain numbers, `_` or `-`. It cannot begin
|
||||
// with `http://` or `https://`.
|
||||
AlicloudImageName string `mapstructure:"image_name" required:"true"`
|
||||
// The version number of the image, with a length limit of 1 to 40 English
|
||||
// characters.
|
||||
AlicloudImageVersion string `mapstructure:"image_version" required:"false"`
|
||||
// The description of the image, with a length limit of 0 to 256
|
||||
// characters. Leaving it blank means null, which is the default value. It
|
||||
// cannot begin with `http://` or `https://`.
|
||||
AlicloudImageDescription string `mapstructure:"image_description" required:"false"`
|
||||
// The IDs of to-be-added Aliyun accounts to which the image is shared. The
|
||||
// number of accounts is 1 to 10. If number of accounts is greater than 10,
|
||||
// this parameter is ignored.
|
||||
AlicloudImageShareAccounts []string `mapstructure:"image_share_account" required:"false"`
|
||||
AlicloudImageUNShareAccounts []string `mapstructure:"image_unshare_account"`
|
||||
// Copy to the destination regionIds.
|
||||
AlicloudImageDestinationRegions []string `mapstructure:"image_copy_regions" required:"false"`
|
||||
// The name of the destination image, [2, 128] English or Chinese
|
||||
// characters. It must begin with an uppercase/lowercase letter or a
|
||||
// Chinese character, and may contain numbers, _ or -. It cannot begin with
|
||||
// `http://` or `https://`.
|
||||
AlicloudImageDestinationNames []string `mapstructure:"image_copy_names" required:"false"`
|
||||
// Whether or not to encrypt the target images, including those
|
||||
// copied if image_copy_regions is specified. If this option is set to
|
||||
// true, a temporary image will be created from the provisioned instance in
|
||||
// the main region and an encrypted copy will be generated in the same
|
||||
// region. By default, Packer will keep the encryption setting to what it
|
||||
// was in the source image.
|
||||
ImageEncrypted config.Trilean `mapstructure:"image_encrypted" required:"false"`
|
||||
// If this value is true, when the target image names including those
|
||||
// copied are duplicated with existing images, it will delete the existing
|
||||
// images and then create the target images, otherwise, the creation will
|
||||
// fail. The default value is false. Check `image_name` and
|
||||
// `image_copy_names` options for names of target images. If
|
||||
// [-force](/docs/commands/build#force) option is provided in `build`
|
||||
// command, this option can be omitted and taken as true.
|
||||
AlicloudImageForceDelete bool `mapstructure:"image_force_delete" required:"false"`
|
||||
// If this value is true, when delete the duplicated existing images, the
|
||||
// source snapshots of those images will be delete either. If
|
||||
// [-force](/docs/commands/build#force) option is provided in `build`
|
||||
// command, this option can be omitted and taken as true.
|
||||
AlicloudImageForceDeleteSnapshots bool `mapstructure:"image_force_delete_snapshots" required:"false"`
|
||||
AlicloudImageForceDeleteInstances bool `mapstructure:"image_force_delete_instances"`
|
||||
// If this value is true, the image created will not include any snapshot
|
||||
// of data disks. This option would be useful for any circumstance that
|
||||
// default data disks with instance types are not concerned. The default
|
||||
// value is false.
|
||||
AlicloudImageIgnoreDataDisks bool `mapstructure:"image_ignore_data_disks" required:"false"`
|
||||
// The region validation can be skipped if this value is true, the default
|
||||
// value is false.
|
||||
AlicloudImageSkipRegionValidation bool `mapstructure:"skip_region_validation" required:"false"`
|
||||
// Key/value pair tags applied to the destination image and relevant
|
||||
// snapshots.
|
||||
AlicloudImageTags map[string]string `mapstructure:"tags" required:"false"`
|
||||
// Same as [`tags`](#tags) but defined as a singular repeatable block
|
||||
// containing a `key` and a `value` field. In HCL2 mode the
|
||||
// [`dynamic_block`](/docs/configuration/from-1.5/expressions#dynamic-blocks)
|
||||
// will allow you to create those programatically.
|
||||
AlicloudImageTag hcl2template.KeyValues `mapstructure:"tag" required:"false"`
|
||||
AlicloudDiskDevices `mapstructure:",squash"`
|
||||
}
|
||||
|
||||
func (c *AlicloudImageConfig) Prepare(ctx *interpolate.Context) []error {
|
||||
var errs []error
|
||||
errs = append(errs, c.AlicloudImageTag.CopyOn(&c.AlicloudImageTags)...)
|
||||
if c.AlicloudImageName == "" {
|
||||
errs = append(errs, fmt.Errorf("image_name must be specified"))
|
||||
} else if len(c.AlicloudImageName) < 2 || len(c.AlicloudImageName) > 128 {
|
||||
errs = append(errs, fmt.Errorf("image_name must less than 128 letters and more than 1 letters"))
|
||||
} else if strings.HasPrefix(c.AlicloudImageName, "http://") ||
|
||||
strings.HasPrefix(c.AlicloudImageName, "https://") {
|
||||
errs = append(errs, fmt.Errorf("image_name can't start with 'http://' or 'https://'"))
|
||||
}
|
||||
reg := regexp.MustCompile(`\s+`)
|
||||
if reg.FindString(c.AlicloudImageName) != "" {
|
||||
errs = append(errs, fmt.Errorf("image_name can't include spaces"))
|
||||
}
|
||||
|
||||
if len(c.AlicloudImageDestinationRegions) > 0 {
|
||||
regionSet := make(map[string]struct{})
|
||||
regions := make([]string, 0, len(c.AlicloudImageDestinationRegions))
|
||||
|
||||
for _, region := range c.AlicloudImageDestinationRegions {
|
||||
// If we already saw the region, then don't look again
|
||||
if _, ok := regionSet[region]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Mark that we saw the region
|
||||
regionSet[region] = struct{}{}
|
||||
regions = append(regions, region)
|
||||
}
|
||||
|
||||
c.AlicloudImageDestinationRegions = regions
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
61
builder/alicloud/ecs/image_config_test.go
Normal file
61
builder/alicloud/ecs/image_config_test.go
Normal file
@ -0,0 +1,61 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testAlicloudImageConfig() *AlicloudImageConfig {
|
||||
return &AlicloudImageConfig{
|
||||
AlicloudImageName: "foo",
|
||||
}
|
||||
}
|
||||
|
||||
func TestECSImageConfigPrepare_name(t *testing.T) {
|
||||
c := testAlicloudImageConfig()
|
||||
if err := c.Prepare(nil); err != nil {
|
||||
t.Fatalf("shouldn't have err: %s", err)
|
||||
}
|
||||
|
||||
c.AlicloudImageName = ""
|
||||
if err := c.Prepare(nil); err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAMIConfigPrepare_regions(t *testing.T) {
|
||||
c := testAlicloudImageConfig()
|
||||
c.AlicloudImageDestinationRegions = nil
|
||||
if err := c.Prepare(nil); err != nil {
|
||||
t.Fatalf("shouldn't have err: %s", err)
|
||||
}
|
||||
|
||||
c.AlicloudImageDestinationRegions = []string{"cn-beijing", "cn-hangzhou", "eu-central-1"}
|
||||
if err := c.Prepare(nil); err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
|
||||
c.AlicloudImageDestinationRegions = nil
|
||||
c.AlicloudImageSkipRegionValidation = true
|
||||
if err := c.Prepare(nil); err != nil {
|
||||
t.Fatal("shouldn't have error")
|
||||
}
|
||||
c.AlicloudImageSkipRegionValidation = false
|
||||
}
|
||||
|
||||
func TestECSImageConfigPrepare_imageTags(t *testing.T) {
|
||||
c := testAlicloudImageConfig()
|
||||
c.AlicloudImageTags = map[string]string{
|
||||
"TagKey1": "TagValue1",
|
||||
"TagKey2": "TagValue2",
|
||||
}
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if len(c.AlicloudImageTags) != 2 || c.AlicloudImageTags["TagKey1"] != "TagValue1" ||
|
||||
c.AlicloudImageTags["TagKey2"] != "TagValue2" {
|
||||
t.Fatalf("invalid value, expected: %s, actual: %s", map[string]string{
|
||||
"TagKey1": "TagValue1",
|
||||
"TagKey2": "TagValue2",
|
||||
}, c.AlicloudImageTags)
|
||||
}
|
||||
}
|
52
builder/alicloud/ecs/packer_helper.go
Normal file
52
builder/alicloud/ecs/packer_helper.go
Normal file
@ -0,0 +1,52 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
func cleanUpMessage(state multistep.StateBag, module string) {
|
||||
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||
_, halted := state.GetOk(multistep.StateHalted)
|
||||
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
if cancelled || halted {
|
||||
ui.Say(fmt.Sprintf("Deleting %s because of cancellation or error...", module))
|
||||
} else {
|
||||
ui.Say(fmt.Sprintf("Cleaning up '%s'", module))
|
||||
}
|
||||
}
|
||||
|
||||
func halt(state multistep.StateBag, err error, prefix string) multistep.StepAction {
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
if prefix != "" {
|
||||
err = fmt.Errorf("%s: %s", prefix, err)
|
||||
}
|
||||
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
func convertNumber(value int) string {
|
||||
if value <= 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strconv.Itoa(value)
|
||||
}
|
||||
|
||||
func ContainsInArray(arr []string, value string) bool {
|
||||
for _, item := range arr {
|
||||
if item == value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
154
builder/alicloud/ecs/run_config.go
Normal file
154
builder/alicloud/ecs/run_config.go
Normal file
@ -0,0 +1,154 @@
|
||||
//go:generate struct-markdown
|
||||
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/packer/helper/communicator"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/config"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/template/interpolate"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/uuid"
|
||||
)
|
||||
|
||||
type RunConfig struct {
|
||||
AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address"`
|
||||
// ID of the zone to which the disk belongs.
|
||||
ZoneId string `mapstructure:"zone_id" required:"false"`
|
||||
// Whether an ECS instance is I/O optimized or not. If this option is not
|
||||
// provided, the value will be determined by product API according to what
|
||||
// `instance_type` is used.
|
||||
IOOptimized config.Trilean `mapstructure:"io_optimized" required:"false"`
|
||||
// Type of the instance. For values, see [Instance Type
|
||||
// Table](https://www.alibabacloud.com/help/doc-detail/25378.htm?spm=a3c0i.o25499en.a3.9.14a36ac8iYqKRA).
|
||||
// You can also obtain the latest instance type table by invoking the
|
||||
// [Querying Instance Type
|
||||
// Table](https://intl.aliyun.com/help/doc-detail/25620.htm?spm=a3c0i.o25499en.a3.6.Dr1bik)
|
||||
// interface.
|
||||
InstanceType string `mapstructure:"instance_type" required:"true"`
|
||||
Description string `mapstructure:"description"`
|
||||
// This is the base image id which you want to
|
||||
// create your customized images.
|
||||
AlicloudSourceImage string `mapstructure:"source_image" required:"true"`
|
||||
// Whether to force shutdown upon device
|
||||
// restart. The default value is `false`.
|
||||
//
|
||||
// If it is set to `false`, the system is shut down normally; if it is set to
|
||||
// `true`, the system is forced to shut down.
|
||||
ForceStopInstance bool `mapstructure:"force_stop_instance" required:"false"`
|
||||
// If this option is set to true, Packer
|
||||
// will not stop the instance for you, and you need to make sure the instance
|
||||
// will be stopped in the final provisioner command. Otherwise, Packer will
|
||||
// timeout while waiting the instance to be stopped. This option is provided
|
||||
// for some specific scenarios that you want to stop the instance by yourself.
|
||||
// E.g., Sysprep a windows which may shutdown the instance within its command.
|
||||
// The default value is false.
|
||||
DisableStopInstance bool `mapstructure:"disable_stop_instance" required:"false"`
|
||||
// ID of the security group to which a newly
|
||||
// created instance belongs. Mutual access is allowed between instances in one
|
||||
// security group. If not specified, the newly created instance will be added
|
||||
// to the default security group. If the default group doesn’t exist, or the
|
||||
// number of instances in it has reached the maximum limit, a new security
|
||||
// group will be created automatically.
|
||||
SecurityGroupId string `mapstructure:"security_group_id" required:"false"`
|
||||
// The security group name. The default value
|
||||
// is blank. [2, 128] English or Chinese characters, must begin with an
|
||||
// uppercase/lowercase letter or Chinese character. Can contain numbers, .,
|
||||
// _ or -. It cannot begin with `http://` or `https://`.
|
||||
SecurityGroupName string `mapstructure:"security_group_name" required:"false"`
|
||||
// User data to apply when launching the instance. Note
|
||||
// that you need to be careful about escaping characters due to the templates
|
||||
// being JSON. It is often more convenient to use user_data_file, instead.
|
||||
// Packer will not automatically wait for a user script to finish before
|
||||
// shutting down the instance this must be handled in a provisioner.
|
||||
UserData string `mapstructure:"user_data" required:"false"`
|
||||
// Path to a file that will be used for the user
|
||||
// data when launching the instance.
|
||||
UserDataFile string `mapstructure:"user_data_file" required:"false"`
|
||||
// VPC ID allocated by the system.
|
||||
VpcId string `mapstructure:"vpc_id" required:"false"`
|
||||
// The VPC name. The default value is blank. [2, 128]
|
||||
// English or Chinese characters, must begin with an uppercase/lowercase
|
||||
// letter or Chinese character. Can contain numbers, _ and -. The disk
|
||||
// description will appear on the console. Cannot begin with `http://` or
|
||||
// `https://`.
|
||||
VpcName string `mapstructure:"vpc_name" required:"false"`
|
||||
// Value options: 192.168.0.0/16 and
|
||||
// 172.16.0.0/16. When not specified, the default value is 172.16.0.0/16.
|
||||
CidrBlock string `mapstructure:"vpc_cidr_block" required:"false"`
|
||||
// The ID of the VSwitch to be used.
|
||||
VSwitchId string `mapstructure:"vswitch_id" required:"false"`
|
||||
// The ID of the VSwitch to be used.
|
||||
VSwitchName string `mapstructure:"vswitch_name" required:"false"`
|
||||
// Display name of the instance, which is a string of 2 to 128 Chinese or
|
||||
// English characters. It must begin with an uppercase/lowercase letter or
|
||||
// a Chinese character and can contain numerals, `.`, `_`, or `-`. The
|
||||
// instance name is displayed on the Alibaba Cloud console. If this
|
||||
// parameter is not specified, the default value is InstanceId of the
|
||||
// instance. It cannot begin with `http://` or `https://`.
|
||||
InstanceName string `mapstructure:"instance_name" required:"false"`
|
||||
// Internet charge type, which can be
|
||||
// `PayByTraffic` or `PayByBandwidth`. Optional values:
|
||||
// - `PayByBandwidth`
|
||||
// - `PayByTraffic`
|
||||
//
|
||||
// If this parameter is not specified, the default value is `PayByBandwidth`.
|
||||
// For the regions out of China, currently only support `PayByTraffic`, you
|
||||
// must set it manfully.
|
||||
InternetChargeType string `mapstructure:"internet_charge_type" required:"false"`
|
||||
// Maximum outgoing bandwidth to the
|
||||
// public network, measured in Mbps (Mega bits per second).
|
||||
//
|
||||
// Value range:
|
||||
// - `PayByBandwidth`: \[0, 100\]. If this parameter is not specified, API
|
||||
// automatically sets it to 0 Mbps.
|
||||
// - `PayByTraffic`: \[1, 100\]. If this parameter is not specified, an
|
||||
// error is returned.
|
||||
InternetMaxBandwidthOut int `mapstructure:"internet_max_bandwidth_out" required:"false"`
|
||||
// Timeout of creating snapshot(s).
|
||||
// The default timeout is 3600 seconds if this option is not set or is set
|
||||
// to 0. For those disks containing lots of data, it may require a higher
|
||||
// timeout value.
|
||||
WaitSnapshotReadyTimeout int `mapstructure:"wait_snapshot_ready_timeout" required:"false"`
|
||||
// Communicator settings
|
||||
Comm communicator.Config `mapstructure:",squash"`
|
||||
// If this value is true, packer will connect to
|
||||
// the ECS created through private ip instead of allocating a public ip or an
|
||||
// EIP. The default value is false.
|
||||
SSHPrivateIp bool `mapstructure:"ssh_private_ip" required:"false"`
|
||||
}
|
||||
|
||||
func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
|
||||
if c.Comm.SSHKeyPairName == "" && c.Comm.SSHTemporaryKeyPairName == "" &&
|
||||
c.Comm.SSHPrivateKeyFile == "" && c.Comm.SSHPassword == "" && c.Comm.WinRMPassword == "" {
|
||||
|
||||
c.Comm.SSHTemporaryKeyPairName = fmt.Sprintf("packer_%s", uuid.TimeOrderedUUID())
|
||||
}
|
||||
|
||||
// Validation
|
||||
errs := c.Comm.Prepare(ctx)
|
||||
if c.AlicloudSourceImage == "" {
|
||||
errs = append(errs, errors.New("A source_image must be specified"))
|
||||
}
|
||||
|
||||
if strings.TrimSpace(c.AlicloudSourceImage) != c.AlicloudSourceImage {
|
||||
errs = append(errs, errors.New("The source_image can't include spaces"))
|
||||
}
|
||||
|
||||
if c.InstanceType == "" {
|
||||
errs = append(errs, errors.New("An alicloud_instance_type must be specified"))
|
||||
}
|
||||
|
||||
if c.UserData != "" && c.UserDataFile != "" {
|
||||
errs = append(errs, fmt.Errorf("Only one of user_data or user_data_file can be specified."))
|
||||
} else if c.UserDataFile != "" {
|
||||
if _, err := os.Stat(c.UserDataFile); err != nil {
|
||||
errs = append(errs, fmt.Errorf("user_data_file not found: %s", c.UserDataFile))
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
178
builder/alicloud/ecs/run_config_test.go
Normal file
178
builder/alicloud/ecs/run_config_test.go
Normal file
@ -0,0 +1,178 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer/helper/communicator"
|
||||
)
|
||||
|
||||
func testConfig() *RunConfig {
|
||||
return &RunConfig{
|
||||
AlicloudSourceImage: "alicloud_images",
|
||||
InstanceType: "ecs.n1.tiny",
|
||||
Comm: communicator.Config{
|
||||
SSH: communicator.SSH{
|
||||
SSHUsername: "alicloud",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare(t *testing.T) {
|
||||
c := testConfig()
|
||||
err := c.Prepare(nil)
|
||||
if len(err) > 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_InstanceType(t *testing.T) {
|
||||
c := testConfig()
|
||||
c.InstanceType = ""
|
||||
if err := c.Prepare(nil); len(err) != 1 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_SourceECSImage(t *testing.T) {
|
||||
c := testConfig()
|
||||
c.AlicloudSourceImage = ""
|
||||
if err := c.Prepare(nil); len(err) != 1 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_SSHPort(t *testing.T) {
|
||||
c := testConfig()
|
||||
c.Comm.SSHPort = 0
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if c.Comm.SSHPort != 22 {
|
||||
t.Fatalf("invalid value: %d", c.Comm.SSHPort)
|
||||
}
|
||||
|
||||
c.Comm.SSHPort = 44
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if c.Comm.SSHPort != 44 {
|
||||
t.Fatalf("invalid value: %d", c.Comm.SSHPort)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_UserData(t *testing.T) {
|
||||
c := testConfig()
|
||||
tf, err := ioutil.TempFile("", "packer")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
defer os.Remove(tf.Name())
|
||||
defer tf.Close()
|
||||
|
||||
c.UserData = "foo"
|
||||
c.UserDataFile = tf.Name()
|
||||
if err := c.Prepare(nil); len(err) != 1 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_UserDataFile(t *testing.T) {
|
||||
c := testConfig()
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
c.UserDataFile = "idontexistidontthink"
|
||||
if err := c.Prepare(nil); len(err) != 1 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
tf, err := ioutil.TempFile("", "packer")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
defer os.Remove(tf.Name())
|
||||
defer tf.Close()
|
||||
|
||||
c.UserDataFile = tf.Name()
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_TemporaryKeyPairName(t *testing.T) {
|
||||
c := testConfig()
|
||||
c.Comm.SSHTemporaryKeyPairName = ""
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if c.Comm.SSHTemporaryKeyPairName == "" {
|
||||
t.Fatal("keypair name is empty")
|
||||
}
|
||||
|
||||
c.Comm.SSHTemporaryKeyPairName = "ssh-key-123"
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if c.Comm.SSHTemporaryKeyPairName != "ssh-key-123" {
|
||||
t.Fatal("keypair name does not match")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_SSHPrivateIp(t *testing.T) {
|
||||
c := testConfig()
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if c.SSHPrivateIp != false {
|
||||
t.Fatalf("invalid value, expected: %t, actul: %t", false, c.SSHPrivateIp)
|
||||
}
|
||||
c.SSHPrivateIp = true
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if c.SSHPrivateIp != true {
|
||||
t.Fatalf("invalid value, expected: %t, actul: %t", true, c.SSHPrivateIp)
|
||||
}
|
||||
c.SSHPrivateIp = false
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if c.SSHPrivateIp != false {
|
||||
t.Fatalf("invalid value, expected: %t, actul: %t", false, c.SSHPrivateIp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_DisableStopInstance(t *testing.T) {
|
||||
c := testConfig()
|
||||
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if c.DisableStopInstance != false {
|
||||
t.Fatalf("invalid value, expected: %t, actul: %t", false, c.DisableStopInstance)
|
||||
}
|
||||
|
||||
c.DisableStopInstance = true
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if c.DisableStopInstance != true {
|
||||
t.Fatalf("invalid value, expected: %t, actul: %t", true, c.DisableStopInstance)
|
||||
}
|
||||
|
||||
c.DisableStopInstance = false
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if c.DisableStopInstance != false {
|
||||
t.Fatalf("invalid value, expected: %t, actul: %t", false, c.DisableStopInstance)
|
||||
}
|
||||
}
|
23
builder/alicloud/ecs/ssh_helper.go
Normal file
23
builder/alicloud/ecs/ssh_helper.go
Normal file
@ -0,0 +1,23 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
)
|
||||
|
||||
var (
|
||||
// modified in tests
|
||||
sshHostSleepDuration = time.Second
|
||||
)
|
||||
|
||||
type alicloudSSHHelper interface {
|
||||
}
|
||||
|
||||
// SSHHost returns a function that can be given to the SSH communicator
|
||||
func SSHHost(e alicloudSSHHelper, private bool) func(multistep.StateBag) (string, error) {
|
||||
return func(state multistep.StateBag) (string, error) {
|
||||
ipAddress := state.Get("ipaddress").(string)
|
||||
return ipAddress, nil
|
||||
}
|
||||
}
|
77
builder/alicloud/ecs/step_attach_keypair.go
Normal file
77
builder/alicloud/ecs/step_attach_keypair.go
Normal file
@ -0,0 +1,77 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
type stepAttachKeyPair struct {
|
||||
}
|
||||
|
||||
var attachKeyPairNotRetryErrors = []string{
|
||||
"MissingParameter",
|
||||
"DependencyViolation.WindowsInstance",
|
||||
"InvalidKeyPairName.NotFound",
|
||||
"InvalidRegionId.NotFound",
|
||||
}
|
||||
|
||||
func (s *stepAttachKeyPair) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
config := state.Get("config").(*Config)
|
||||
instance := state.Get("instance").(*ecs.Instance)
|
||||
keyPairName := config.Comm.SSHKeyPairName
|
||||
if keyPairName == "" {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
_, err := client.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
request := ecs.CreateAttachKeyPairRequest()
|
||||
request.RegionId = config.AlicloudRegion
|
||||
request.KeyPairName = keyPairName
|
||||
request.InstanceIds = "[\"" + instance.InstanceId + "\"]"
|
||||
return client.AttachKeyPair(request)
|
||||
},
|
||||
EvalFunc: client.EvalCouldRetryResponse(attachKeyPairNotRetryErrors, EvalNotRetryErrorType),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return halt(state, err, fmt.Sprintf("Error attaching keypair %s to instance %s", keyPairName, instance.InstanceId))
|
||||
}
|
||||
|
||||
ui.Message(fmt.Sprintf("Attach keypair %s to instance: %s", keyPairName, instance.InstanceId))
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepAttachKeyPair) Cleanup(state multistep.StateBag) {
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
config := state.Get("config").(*Config)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
instance := state.Get("instance").(*ecs.Instance)
|
||||
keyPairName := config.Comm.SSHKeyPairName
|
||||
if keyPairName == "" {
|
||||
return
|
||||
}
|
||||
|
||||
detachKeyPairRequest := ecs.CreateDetachKeyPairRequest()
|
||||
detachKeyPairRequest.RegionId = config.AlicloudRegion
|
||||
detachKeyPairRequest.KeyPairName = keyPairName
|
||||
detachKeyPairRequest.InstanceIds = fmt.Sprintf("[\"%s\"]", instance.InstanceId)
|
||||
_, err := client.DetachKeyPair(detachKeyPairRequest)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error Detaching keypair %s to instance %s : %s", keyPairName,
|
||||
instance.InstanceId, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
ui.Message(fmt.Sprintf("Detach keypair %s from instance: %s", keyPairName, instance.InstanceId))
|
||||
|
||||
}
|
57
builder/alicloud/ecs/step_check_source_image.go
Normal file
57
builder/alicloud/ecs/step_check_source_image.go
Normal file
@ -0,0 +1,57 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
type stepCheckAlicloudSourceImage struct {
|
||||
SourceECSImageId string
|
||||
}
|
||||
|
||||
func (s *stepCheckAlicloudSourceImage) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
config := state.Get("config").(*Config)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
describeImagesRequest := ecs.CreateDescribeImagesRequest()
|
||||
describeImagesRequest.RegionId = config.AlicloudRegion
|
||||
describeImagesRequest.ImageId = config.AlicloudSourceImage
|
||||
if config.AlicloudSkipImageValidation {
|
||||
describeImagesRequest.ShowExpired = "true"
|
||||
}
|
||||
imagesResponse, err := client.DescribeImages(describeImagesRequest)
|
||||
if err != nil {
|
||||
return halt(state, err, "Error querying alicloud image")
|
||||
}
|
||||
|
||||
images := imagesResponse.Images.Image
|
||||
|
||||
// Describe marketplace image
|
||||
describeImagesRequest.ImageOwnerAlias = "marketplace"
|
||||
marketImagesResponse, err := client.DescribeImages(describeImagesRequest)
|
||||
if err != nil {
|
||||
return halt(state, err, "Error querying alicloud marketplace image")
|
||||
}
|
||||
|
||||
marketImages := marketImagesResponse.Images.Image
|
||||
if len(marketImages) > 0 {
|
||||
images = append(images, marketImages...)
|
||||
}
|
||||
|
||||
if len(images) == 0 {
|
||||
err := fmt.Errorf("No alicloud image was found matching filters: %v", config.AlicloudSourceImage)
|
||||
return halt(state, err, "")
|
||||
}
|
||||
|
||||
ui.Message(fmt.Sprintf("Found image ID: %s", images[0].ImageId))
|
||||
|
||||
state.Put("source_image", &images[0])
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepCheckAlicloudSourceImage) Cleanup(multistep.StateBag) {}
|
165
builder/alicloud/ecs/step_config_eip.go
Normal file
165
builder/alicloud/ecs/step_config_eip.go
Normal file
@ -0,0 +1,165 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/uuid"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
type stepConfigAlicloudEIP struct {
|
||||
AssociatePublicIpAddress bool
|
||||
RegionId string
|
||||
InternetChargeType string
|
||||
InternetMaxBandwidthOut int
|
||||
allocatedId string
|
||||
SSHPrivateIp bool
|
||||
}
|
||||
|
||||
var allocateEipAddressRetryErrors = []string{
|
||||
"LastTokenProcessing",
|
||||
}
|
||||
|
||||
func (s *stepConfigAlicloudEIP) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
instance := state.Get("instance").(*ecs.Instance)
|
||||
|
||||
if s.SSHPrivateIp {
|
||||
ipaddress := instance.VpcAttributes.PrivateIpAddress.IpAddress
|
||||
if len(ipaddress) == 0 {
|
||||
ui.Say("Failed to get private ip of instance")
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
state.Put("ipaddress", ipaddress[0])
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
ui.Say("Allocating eip...")
|
||||
|
||||
allocateEipAddressRequest := s.buildAllocateEipAddressRequest(state)
|
||||
allocateEipAddressResponse, err := client.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
return client.AllocateEipAddress(allocateEipAddressRequest)
|
||||
},
|
||||
EvalFunc: client.EvalCouldRetryResponse(allocateEipAddressRetryErrors, EvalRetryErrorType),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return halt(state, err, "Error allocating eip")
|
||||
}
|
||||
|
||||
ipaddress := allocateEipAddressResponse.(*ecs.AllocateEipAddressResponse).EipAddress
|
||||
ui.Message(fmt.Sprintf("Allocated eip: %s", ipaddress))
|
||||
|
||||
allocateId := allocateEipAddressResponse.(*ecs.AllocateEipAddressResponse).AllocationId
|
||||
s.allocatedId = allocateId
|
||||
|
||||
err = s.waitForEipStatus(client, instance.RegionId, s.allocatedId, EipStatusAvailable)
|
||||
if err != nil {
|
||||
return halt(state, err, "Error wait eip available timeout")
|
||||
}
|
||||
|
||||
associateEipAddressRequest := ecs.CreateAssociateEipAddressRequest()
|
||||
associateEipAddressRequest.AllocationId = allocateId
|
||||
associateEipAddressRequest.InstanceId = instance.InstanceId
|
||||
if _, err := client.AssociateEipAddress(associateEipAddressRequest); err != nil {
|
||||
e, ok := err.(errors.Error)
|
||||
if !ok || e.ErrorCode() != "TaskConflict" {
|
||||
return halt(state, err, "Error associating eip")
|
||||
}
|
||||
|
||||
ui.Error(fmt.Sprintf("Error associate eip: %s", err))
|
||||
}
|
||||
|
||||
err = s.waitForEipStatus(client, instance.RegionId, s.allocatedId, EipStatusInUse)
|
||||
if err != nil {
|
||||
return halt(state, err, "Error wait eip associated timeout")
|
||||
}
|
||||
|
||||
state.Put("ipaddress", ipaddress)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepConfigAlicloudEIP) Cleanup(state multistep.StateBag) {
|
||||
if len(s.allocatedId) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
cleanUpMessage(state, "EIP")
|
||||
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
instance := state.Get("instance").(*ecs.Instance)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
unassociateEipAddressRequest := ecs.CreateUnassociateEipAddressRequest()
|
||||
unassociateEipAddressRequest.AllocationId = s.allocatedId
|
||||
unassociateEipAddressRequest.InstanceId = instance.InstanceId
|
||||
if _, err := client.UnassociateEipAddress(unassociateEipAddressRequest); err != nil {
|
||||
ui.Say(fmt.Sprintf("Failed to unassociate eip: %s", err))
|
||||
}
|
||||
|
||||
if err := s.waitForEipStatus(client, instance.RegionId, s.allocatedId, EipStatusAvailable); err != nil {
|
||||
ui.Say(fmt.Sprintf("Timeout while unassociating eip: %s", err))
|
||||
}
|
||||
|
||||
releaseEipAddressRequest := ecs.CreateReleaseEipAddressRequest()
|
||||
releaseEipAddressRequest.AllocationId = s.allocatedId
|
||||
if _, err := client.ReleaseEipAddress(releaseEipAddressRequest); err != nil {
|
||||
ui.Say(fmt.Sprintf("Failed to release eip: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stepConfigAlicloudEIP) waitForEipStatus(client *ClientWrapper, regionId string, allocationId string, expectedStatus string) error {
|
||||
describeEipAddressesRequest := ecs.CreateDescribeEipAddressesRequest()
|
||||
describeEipAddressesRequest.RegionId = regionId
|
||||
describeEipAddressesRequest.AllocationId = s.allocatedId
|
||||
|
||||
_, err := client.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
response, err := client.DescribeEipAddresses(describeEipAddressesRequest)
|
||||
if err == nil && len(response.EipAddresses.EipAddress) == 0 {
|
||||
err = fmt.Errorf("eip allocated is not find")
|
||||
}
|
||||
|
||||
return response, err
|
||||
},
|
||||
EvalFunc: func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
|
||||
if err != nil {
|
||||
return WaitForExpectToRetry
|
||||
}
|
||||
|
||||
eipAddressesResponse := response.(*ecs.DescribeEipAddressesResponse)
|
||||
eipAddresses := eipAddressesResponse.EipAddresses.EipAddress
|
||||
|
||||
for _, eipAddress := range eipAddresses {
|
||||
if eipAddress.Status == expectedStatus {
|
||||
return WaitForExpectSuccess
|
||||
}
|
||||
}
|
||||
|
||||
return WaitForExpectToRetry
|
||||
},
|
||||
RetryTimes: shortRetryTimes,
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *stepConfigAlicloudEIP) buildAllocateEipAddressRequest(state multistep.StateBag) *ecs.AllocateEipAddressRequest {
|
||||
instance := state.Get("instance").(*ecs.Instance)
|
||||
|
||||
request := ecs.CreateAllocateEipAddressRequest()
|
||||
request.ClientToken = uuid.TimeOrderedUUID()
|
||||
request.RegionId = instance.RegionId
|
||||
request.InternetChargeType = s.InternetChargeType
|
||||
request.Bandwidth = string(convertNumber(s.InternetMaxBandwidthOut))
|
||||
|
||||
return request
|
||||
}
|
132
builder/alicloud/ecs/step_config_key_pair.go
Normal file
132
builder/alicloud/ecs/step_config_key_pair.go
Normal file
@ -0,0 +1,132 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/helper/communicator"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
type stepConfigAlicloudKeyPair struct {
|
||||
Debug bool
|
||||
Comm *communicator.Config
|
||||
DebugKeyPath string
|
||||
RegionId string
|
||||
|
||||
keyName string
|
||||
}
|
||||
|
||||
func (s *stepConfigAlicloudKeyPair) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
if s.Comm.SSHPrivateKeyFile != "" {
|
||||
ui.Say("Using existing SSH private key")
|
||||
privateKeyBytes, err := s.Comm.ReadSSHPrivateKeyFile()
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
s.Comm.SSHPrivateKey = privateKeyBytes
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
if s.Comm.SSHAgentAuth && s.Comm.SSHKeyPairName == "" {
|
||||
ui.Say("Using SSH Agent with key pair in source image")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
if s.Comm.SSHAgentAuth && s.Comm.SSHKeyPairName != "" {
|
||||
ui.Say(fmt.Sprintf("Using SSH Agent for existing key pair %s", s.Comm.SSHKeyPairName))
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
if s.Comm.SSHTemporaryKeyPairName == "" {
|
||||
ui.Say("Not using temporary keypair")
|
||||
s.Comm.SSHKeyPairName = ""
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui.Say(fmt.Sprintf("Creating temporary keypair: %s", s.Comm.SSHTemporaryKeyPairName))
|
||||
|
||||
createKeyPairRequest := ecs.CreateCreateKeyPairRequest()
|
||||
createKeyPairRequest.RegionId = s.RegionId
|
||||
createKeyPairRequest.KeyPairName = s.Comm.SSHTemporaryKeyPairName
|
||||
keyResp, err := client.CreateKeyPair(createKeyPairRequest)
|
||||
if err != nil {
|
||||
return halt(state, err, "Error creating temporary keypair")
|
||||
}
|
||||
|
||||
// Set the keyname so we know to delete it later
|
||||
s.keyName = s.Comm.SSHTemporaryKeyPairName
|
||||
|
||||
// Set some state data for use in future steps
|
||||
s.Comm.SSHKeyPairName = s.keyName
|
||||
s.Comm.SSHPrivateKey = []byte(keyResp.PrivateKeyBody)
|
||||
|
||||
// If we're in debug mode, output the private key to the working
|
||||
// directory.
|
||||
if s.Debug {
|
||||
ui.Message(fmt.Sprintf("Saving key for debug purposes: %s", s.DebugKeyPath))
|
||||
f, err := os.Create(s.DebugKeyPath)
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error saving debug key: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Write the key out
|
||||
if _, err := f.Write([]byte(keyResp.PrivateKeyBody)); err != nil {
|
||||
state.Put("error", fmt.Errorf("Error saving debug key: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Chmod it so that it is SSH ready
|
||||
if runtime.GOOS != "windows" {
|
||||
if err := f.Chmod(0600); err != nil {
|
||||
state.Put("error", fmt.Errorf("Error setting permissions of debug key: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepConfigAlicloudKeyPair) Cleanup(state multistep.StateBag) {
|
||||
// If no key name is set, then we never created it, so just return
|
||||
// If we used an SSH private key file, do not go about deleting
|
||||
// keypairs
|
||||
if s.Comm.SSHPrivateKeyFile != "" || (s.Comm.SSHKeyPairName == "" && s.keyName == "") {
|
||||
return
|
||||
}
|
||||
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
// Remove the keypair
|
||||
ui.Say("Deleting temporary keypair...")
|
||||
|
||||
deleteKeyPairsRequest := ecs.CreateDeleteKeyPairsRequest()
|
||||
deleteKeyPairsRequest.RegionId = s.RegionId
|
||||
deleteKeyPairsRequest.KeyPairNames = fmt.Sprintf("[\"%s\"]", s.keyName)
|
||||
_, err := client.DeleteKeyPairs(deleteKeyPairsRequest)
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error cleaning up keypair. Please delete the key manually: %s", s.keyName))
|
||||
}
|
||||
|
||||
// Also remove the physical key if we're debugging.
|
||||
if s.Debug {
|
||||
if err := os.Remove(s.DebugKeyPath); err != nil {
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error removing debug key '%s': %s", s.DebugKeyPath, err))
|
||||
}
|
||||
}
|
||||
}
|
48
builder/alicloud/ecs/step_config_public_ip.go
Normal file
48
builder/alicloud/ecs/step_config_public_ip.go
Normal file
@ -0,0 +1,48 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
type stepConfigAlicloudPublicIP struct {
|
||||
publicIPAddress string
|
||||
RegionId string
|
||||
SSHPrivateIp bool
|
||||
}
|
||||
|
||||
func (s *stepConfigAlicloudPublicIP) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
instance := state.Get("instance").(*ecs.Instance)
|
||||
|
||||
if s.SSHPrivateIp {
|
||||
ipaddress := instance.InnerIpAddress.IpAddress
|
||||
if len(ipaddress) == 0 {
|
||||
ui.Say("Failed to get private ip of instance")
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
state.Put("ipaddress", ipaddress[0])
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
allocatePublicIpAddressRequest := ecs.CreateAllocatePublicIpAddressRequest()
|
||||
allocatePublicIpAddressRequest.InstanceId = instance.InstanceId
|
||||
ipaddress, err := client.AllocatePublicIpAddress(allocatePublicIpAddressRequest)
|
||||
if err != nil {
|
||||
return halt(state, err, "Error allocating public ip")
|
||||
}
|
||||
|
||||
s.publicIPAddress = ipaddress.IpAddress
|
||||
ui.Say(fmt.Sprintf("Allocated public ip address %s.", ipaddress.IpAddress))
|
||||
state.Put("ipaddress", ipaddress.IpAddress)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepConfigAlicloudPublicIP) Cleanup(state multistep.StateBag) {
|
||||
|
||||
}
|
152
builder/alicloud/ecs/step_config_security_group.go
Normal file
152
builder/alicloud/ecs/step_config_security_group.go
Normal file
@ -0,0 +1,152 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/uuid"
|
||||
)
|
||||
|
||||
type stepConfigAlicloudSecurityGroup struct {
|
||||
SecurityGroupId string
|
||||
SecurityGroupName string
|
||||
Description string
|
||||
VpcId string
|
||||
RegionId string
|
||||
isCreate bool
|
||||
}
|
||||
|
||||
var createSecurityGroupRetryErrors = []string{
|
||||
"IdempotentProcessing",
|
||||
}
|
||||
|
||||
var deleteSecurityGroupRetryErrors = []string{
|
||||
"DependencyViolation",
|
||||
}
|
||||
|
||||
func (s *stepConfigAlicloudSecurityGroup) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
networkType := state.Get("networktype").(InstanceNetWork)
|
||||
|
||||
if len(s.SecurityGroupId) != 0 {
|
||||
describeSecurityGroupsRequest := ecs.CreateDescribeSecurityGroupsRequest()
|
||||
describeSecurityGroupsRequest.RegionId = s.RegionId
|
||||
describeSecurityGroupsRequest.SecurityGroupId = s.SecurityGroupId
|
||||
if networkType == InstanceNetworkVpc {
|
||||
vpcId := state.Get("vpcid").(string)
|
||||
describeSecurityGroupsRequest.VpcId = vpcId
|
||||
}
|
||||
|
||||
securityGroupsResponse, err := client.DescribeSecurityGroups(describeSecurityGroupsRequest)
|
||||
if err != nil {
|
||||
return halt(state, err, "Failed querying security group")
|
||||
}
|
||||
|
||||
securityGroupItems := securityGroupsResponse.SecurityGroups.SecurityGroup
|
||||
for _, securityGroupItem := range securityGroupItems {
|
||||
if securityGroupItem.SecurityGroupId == s.SecurityGroupId {
|
||||
state.Put("securitygroupid", s.SecurityGroupId)
|
||||
s.isCreate = false
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
}
|
||||
|
||||
s.isCreate = false
|
||||
err = fmt.Errorf("The specified security group {%s} doesn't exist.", s.SecurityGroupId)
|
||||
return halt(state, err, "")
|
||||
}
|
||||
|
||||
ui.Say("Creating security group...")
|
||||
|
||||
createSecurityGroupRequest := s.buildCreateSecurityGroupRequest(state)
|
||||
securityGroupResponse, err := client.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
return client.CreateSecurityGroup(createSecurityGroupRequest)
|
||||
},
|
||||
EvalFunc: client.EvalCouldRetryResponse(createSecurityGroupRetryErrors, EvalRetryErrorType),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return halt(state, err, "Failed creating security group")
|
||||
}
|
||||
|
||||
securityGroupId := securityGroupResponse.(*ecs.CreateSecurityGroupResponse).SecurityGroupId
|
||||
|
||||
ui.Message(fmt.Sprintf("Created security group: %s", securityGroupId))
|
||||
state.Put("securitygroupid", securityGroupId)
|
||||
s.isCreate = true
|
||||
s.SecurityGroupId = securityGroupId
|
||||
|
||||
authorizeSecurityGroupEgressRequest := ecs.CreateAuthorizeSecurityGroupEgressRequest()
|
||||
authorizeSecurityGroupEgressRequest.SecurityGroupId = securityGroupId
|
||||
authorizeSecurityGroupEgressRequest.RegionId = s.RegionId
|
||||
authorizeSecurityGroupEgressRequest.IpProtocol = IpProtocolAll
|
||||
authorizeSecurityGroupEgressRequest.PortRange = DefaultPortRange
|
||||
authorizeSecurityGroupEgressRequest.NicType = NicTypeInternet
|
||||
authorizeSecurityGroupEgressRequest.DestCidrIp = DefaultCidrIp
|
||||
|
||||
if _, err := client.AuthorizeSecurityGroupEgress(authorizeSecurityGroupEgressRequest); err != nil {
|
||||
return halt(state, err, "Failed authorizing security group")
|
||||
}
|
||||
|
||||
authorizeSecurityGroupRequest := ecs.CreateAuthorizeSecurityGroupRequest()
|
||||
authorizeSecurityGroupRequest.SecurityGroupId = securityGroupId
|
||||
authorizeSecurityGroupRequest.RegionId = s.RegionId
|
||||
authorizeSecurityGroupRequest.IpProtocol = IpProtocolAll
|
||||
authorizeSecurityGroupRequest.PortRange = DefaultPortRange
|
||||
authorizeSecurityGroupRequest.NicType = NicTypeInternet
|
||||
authorizeSecurityGroupRequest.SourceCidrIp = DefaultCidrIp
|
||||
|
||||
if _, err := client.AuthorizeSecurityGroup(authorizeSecurityGroupRequest); err != nil {
|
||||
return halt(state, err, "Failed authorizing security group")
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepConfigAlicloudSecurityGroup) Cleanup(state multistep.StateBag) {
|
||||
if !s.isCreate {
|
||||
return
|
||||
}
|
||||
|
||||
cleanUpMessage(state, "security group")
|
||||
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
_, err := client.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
request := ecs.CreateDeleteSecurityGroupRequest()
|
||||
request.RegionId = s.RegionId
|
||||
request.SecurityGroupId = s.SecurityGroupId
|
||||
return client.DeleteSecurityGroup(request)
|
||||
},
|
||||
EvalFunc: client.EvalCouldRetryResponse(deleteSecurityGroupRetryErrors, EvalRetryErrorType),
|
||||
RetryTimes: shortRetryTimes,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Failed to delete security group, it may still be around: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stepConfigAlicloudSecurityGroup) buildCreateSecurityGroupRequest(state multistep.StateBag) *ecs.CreateSecurityGroupRequest {
|
||||
networkType := state.Get("networktype").(InstanceNetWork)
|
||||
|
||||
request := ecs.CreateCreateSecurityGroupRequest()
|
||||
request.ClientToken = uuid.TimeOrderedUUID()
|
||||
request.RegionId = s.RegionId
|
||||
request.SecurityGroupName = s.SecurityGroupName
|
||||
|
||||
if networkType == InstanceNetworkVpc {
|
||||
vpcId := state.Get("vpcid").(string)
|
||||
request.VpcId = vpcId
|
||||
}
|
||||
|
||||
return request
|
||||
}
|
148
builder/alicloud/ecs/step_config_vpc.go
Normal file
148
builder/alicloud/ecs/step_config_vpc.go
Normal file
@ -0,0 +1,148 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
errorsNew "errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/uuid"
|
||||
)
|
||||
|
||||
type stepConfigAlicloudVPC struct {
|
||||
VpcId string
|
||||
CidrBlock string //192.168.0.0/16 or 172.16.0.0/16 (default)
|
||||
VpcName string
|
||||
isCreate bool
|
||||
}
|
||||
|
||||
var createVpcRetryErrors = []string{
|
||||
"TOKEN_PROCESSING",
|
||||
}
|
||||
|
||||
var deleteVpcRetryErrors = []string{
|
||||
"DependencyViolation.Instance",
|
||||
"DependencyViolation.RouteEntry",
|
||||
"DependencyViolation.VSwitch",
|
||||
"DependencyViolation.SecurityGroup",
|
||||
"Forbbiden",
|
||||
"TaskConflict",
|
||||
}
|
||||
|
||||
func (s *stepConfigAlicloudVPC) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
if len(s.VpcId) != 0 {
|
||||
describeVpcsRequest := ecs.CreateDescribeVpcsRequest()
|
||||
describeVpcsRequest.VpcId = s.VpcId
|
||||
describeVpcsRequest.RegionId = config.AlicloudRegion
|
||||
|
||||
vpcsResponse, err := client.DescribeVpcs(describeVpcsRequest)
|
||||
if err != nil {
|
||||
return halt(state, err, "Failed querying vpcs")
|
||||
}
|
||||
|
||||
vpcs := vpcsResponse.Vpcs.Vpc
|
||||
if len(vpcs) > 0 {
|
||||
state.Put("vpcid", vpcs[0].VpcId)
|
||||
s.isCreate = false
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
message := fmt.Sprintf("The specified vpc {%s} doesn't exist.", s.VpcId)
|
||||
return halt(state, errorsNew.New(message), "")
|
||||
}
|
||||
|
||||
ui.Say("Creating vpc...")
|
||||
|
||||
createVpcRequest := s.buildCreateVpcRequest(state)
|
||||
createVpcResponse, err := client.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
return client.CreateVpc(createVpcRequest)
|
||||
},
|
||||
EvalFunc: client.EvalCouldRetryResponse(createVpcRetryErrors, EvalRetryErrorType),
|
||||
})
|
||||
if err != nil {
|
||||
return halt(state, err, "Failed creating vpc")
|
||||
}
|
||||
|
||||
vpcId := createVpcResponse.(*ecs.CreateVpcResponse).VpcId
|
||||
_, err = client.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
request := ecs.CreateDescribeVpcsRequest()
|
||||
request.RegionId = config.AlicloudRegion
|
||||
request.VpcId = vpcId
|
||||
return client.DescribeVpcs(request)
|
||||
},
|
||||
EvalFunc: func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
|
||||
if err != nil {
|
||||
return WaitForExpectToRetry
|
||||
}
|
||||
|
||||
vpcsResponse := response.(*ecs.DescribeVpcsResponse)
|
||||
vpcs := vpcsResponse.Vpcs.Vpc
|
||||
if len(vpcs) > 0 {
|
||||
for _, vpc := range vpcs {
|
||||
if vpc.Status == VpcStatusAvailable {
|
||||
return WaitForExpectSuccess
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return WaitForExpectToRetry
|
||||
},
|
||||
RetryTimes: shortRetryTimes,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return halt(state, err, "Failed waiting for vpc to become available")
|
||||
}
|
||||
|
||||
ui.Message(fmt.Sprintf("Created vpc: %s", vpcId))
|
||||
state.Put("vpcid", vpcId)
|
||||
s.isCreate = true
|
||||
s.VpcId = vpcId
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepConfigAlicloudVPC) Cleanup(state multistep.StateBag) {
|
||||
if !s.isCreate {
|
||||
return
|
||||
}
|
||||
|
||||
cleanUpMessage(state, "VPC")
|
||||
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
_, err := client.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
request := ecs.CreateDeleteVpcRequest()
|
||||
request.VpcId = s.VpcId
|
||||
return client.DeleteVpc(request)
|
||||
},
|
||||
EvalFunc: client.EvalCouldRetryResponse(deleteVpcRetryErrors, EvalRetryErrorType),
|
||||
RetryTimes: shortRetryTimes,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Error deleting vpc, it may still be around: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stepConfigAlicloudVPC) buildCreateVpcRequest(state multistep.StateBag) *ecs.CreateVpcRequest {
|
||||
config := state.Get("config").(*Config)
|
||||
|
||||
request := ecs.CreateCreateVpcRequest()
|
||||
request.ClientToken = uuid.TimeOrderedUUID()
|
||||
request.RegionId = config.AlicloudRegion
|
||||
request.CidrBlock = s.CidrBlock
|
||||
request.VpcName = s.VpcName
|
||||
|
||||
return request
|
||||
}
|
207
builder/alicloud/ecs/step_config_vswitch.go
Normal file
207
builder/alicloud/ecs/step_config_vswitch.go
Normal file
@ -0,0 +1,207 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/uuid"
|
||||
)
|
||||
|
||||
type stepConfigAlicloudVSwitch struct {
|
||||
VSwitchId string
|
||||
ZoneId string
|
||||
isCreate bool
|
||||
CidrBlock string
|
||||
VSwitchName string
|
||||
}
|
||||
|
||||
var createVSwitchRetryErrors = []string{
|
||||
"TOKEN_PROCESSING",
|
||||
}
|
||||
|
||||
var deleteVSwitchRetryErrors = []string{
|
||||
"IncorrectVSwitchStatus",
|
||||
"DependencyViolation",
|
||||
"DependencyViolation.HaVip",
|
||||
"IncorrectRouteEntryStatus",
|
||||
"TaskConflict",
|
||||
}
|
||||
|
||||
func (s *stepConfigAlicloudVSwitch) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
vpcId := state.Get("vpcid").(string)
|
||||
config := state.Get("config").(*Config)
|
||||
|
||||
if len(s.VSwitchId) != 0 {
|
||||
describeVSwitchesRequest := ecs.CreateDescribeVSwitchesRequest()
|
||||
describeVSwitchesRequest.VpcId = vpcId
|
||||
describeVSwitchesRequest.VSwitchId = s.VSwitchId
|
||||
describeVSwitchesRequest.ZoneId = s.ZoneId
|
||||
|
||||
vswitchesResponse, err := client.DescribeVSwitches(describeVSwitchesRequest)
|
||||
if err != nil {
|
||||
return halt(state, err, "Failed querying vswitch")
|
||||
}
|
||||
|
||||
vswitch := vswitchesResponse.VSwitches.VSwitch
|
||||
if len(vswitch) > 0 {
|
||||
state.Put("vswitchid", vswitch[0].VSwitchId)
|
||||
s.isCreate = false
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
s.isCreate = false
|
||||
return halt(state, fmt.Errorf("The specified vswitch {%s} doesn't exist.", s.VSwitchId), "")
|
||||
}
|
||||
|
||||
if s.ZoneId == "" {
|
||||
describeZonesRequest := ecs.CreateDescribeZonesRequest()
|
||||
describeZonesRequest.RegionId = config.AlicloudRegion
|
||||
|
||||
zonesResponse, err := client.DescribeZones(describeZonesRequest)
|
||||
if err != nil {
|
||||
return halt(state, err, "Query for available zones failed")
|
||||
}
|
||||
|
||||
var instanceTypes []string
|
||||
zones := zonesResponse.Zones.Zone
|
||||
for _, zone := range zones {
|
||||
isVSwitchSupported := false
|
||||
for _, resourceType := range zone.AvailableResourceCreation.ResourceTypes {
|
||||
if resourceType == "VSwitch" {
|
||||
isVSwitchSupported = true
|
||||
}
|
||||
}
|
||||
|
||||
if isVSwitchSupported {
|
||||
for _, instanceType := range zone.AvailableInstanceTypes.InstanceTypes {
|
||||
if instanceType == config.InstanceType {
|
||||
s.ZoneId = zone.ZoneId
|
||||
break
|
||||
}
|
||||
instanceTypes = append(instanceTypes, instanceType)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if s.ZoneId == "" {
|
||||
if len(instanceTypes) > 0 {
|
||||
ui.Say(fmt.Sprintf("The instance type %s isn't available in this region."+
|
||||
"\n You can either change the instance to one of following: %v \n"+
|
||||
"or choose another region.", config.InstanceType, instanceTypes))
|
||||
|
||||
state.Put("error", fmt.Errorf("The instance type %s isn't available in this region."+
|
||||
"\n You can either change the instance to one of following: %v \n"+
|
||||
"or choose another region.", config.InstanceType, instanceTypes))
|
||||
return multistep.ActionHalt
|
||||
} else {
|
||||
ui.Say(fmt.Sprintf("The instance type %s isn't available in this region."+
|
||||
"\n You can change to other regions.", config.InstanceType))
|
||||
|
||||
state.Put("error", fmt.Errorf("The instance type %s isn't available in this region."+
|
||||
"\n You can change to other regions.", config.InstanceType))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if config.CidrBlock == "" {
|
||||
s.CidrBlock = DefaultCidrBlock //use the default CirdBlock
|
||||
}
|
||||
|
||||
ui.Say("Creating vswitch...")
|
||||
|
||||
createVSwitchRequest := s.buildCreateVSwitchRequest(state)
|
||||
createVSwitchResponse, err := client.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
return client.CreateVSwitch(createVSwitchRequest)
|
||||
},
|
||||
EvalFunc: client.EvalCouldRetryResponse(createVSwitchRetryErrors, EvalRetryErrorType),
|
||||
})
|
||||
if err != nil {
|
||||
return halt(state, err, "Error Creating vswitch")
|
||||
}
|
||||
|
||||
vSwitchId := createVSwitchResponse.(*ecs.CreateVSwitchResponse).VSwitchId
|
||||
|
||||
describeVSwitchesRequest := ecs.CreateDescribeVSwitchesRequest()
|
||||
describeVSwitchesRequest.VpcId = vpcId
|
||||
describeVSwitchesRequest.VSwitchId = vSwitchId
|
||||
|
||||
_, err = client.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
return client.DescribeVSwitches(describeVSwitchesRequest)
|
||||
},
|
||||
EvalFunc: func(response responses.AcsResponse, err error) WaitForExpectEvalResult {
|
||||
if err != nil {
|
||||
return WaitForExpectToRetry
|
||||
}
|
||||
|
||||
vSwitchesResponse := response.(*ecs.DescribeVSwitchesResponse)
|
||||
vSwitches := vSwitchesResponse.VSwitches.VSwitch
|
||||
if len(vSwitches) > 0 {
|
||||
for _, vSwitch := range vSwitches {
|
||||
if vSwitch.Status == VSwitchStatusAvailable {
|
||||
return WaitForExpectSuccess
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return WaitForExpectToRetry
|
||||
},
|
||||
RetryTimes: shortRetryTimes,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return halt(state, err, "Timeout waiting for vswitch to become available")
|
||||
}
|
||||
|
||||
ui.Message(fmt.Sprintf("Created vswitch: %s", vSwitchId))
|
||||
state.Put("vswitchid", vSwitchId)
|
||||
s.isCreate = true
|
||||
s.VSwitchId = vSwitchId
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepConfigAlicloudVSwitch) Cleanup(state multistep.StateBag) {
|
||||
if !s.isCreate {
|
||||
return
|
||||
}
|
||||
|
||||
cleanUpMessage(state, "vSwitch")
|
||||
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
_, err := client.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
request := ecs.CreateDeleteVSwitchRequest()
|
||||
request.VSwitchId = s.VSwitchId
|
||||
return client.DeleteVSwitch(request)
|
||||
},
|
||||
EvalFunc: client.EvalCouldRetryResponse(deleteVSwitchRetryErrors, EvalRetryErrorType),
|
||||
RetryTimes: shortRetryTimes,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Error deleting vswitch, it may still be around: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stepConfigAlicloudVSwitch) buildCreateVSwitchRequest(state multistep.StateBag) *ecs.CreateVSwitchRequest {
|
||||
vpcId := state.Get("vpcid").(string)
|
||||
|
||||
request := ecs.CreateCreateVSwitchRequest()
|
||||
request.ClientToken = uuid.TimeOrderedUUID()
|
||||
request.CidrBlock = s.CidrBlock
|
||||
request.ZoneId = s.ZoneId
|
||||
request.VpcId = vpcId
|
||||
request.VSwitchName = s.VSwitchName
|
||||
|
||||
return request
|
||||
}
|
144
builder/alicloud/ecs/step_create_image.go
Normal file
144
builder/alicloud/ecs/step_create_image.go
Normal file
@ -0,0 +1,144 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/random"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/uuid"
|
||||
)
|
||||
|
||||
type stepCreateAlicloudImage struct {
|
||||
AlicloudImageIgnoreDataDisks bool
|
||||
WaitSnapshotReadyTimeout int
|
||||
image *ecs.Image
|
||||
}
|
||||
|
||||
var createImageRetryErrors = []string{
|
||||
"IdempotentProcessing",
|
||||
}
|
||||
|
||||
func (s *stepCreateAlicloudImage) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
tempImageName := config.AlicloudImageName
|
||||
if config.ImageEncrypted.True() {
|
||||
tempImageName = fmt.Sprintf("packer_%s", random.AlphaNum(7))
|
||||
ui.Say(fmt.Sprintf("Creating temporary image for encryption: %s", tempImageName))
|
||||
} else {
|
||||
ui.Say(fmt.Sprintf("Creating image: %s", tempImageName))
|
||||
}
|
||||
|
||||
createImageRequest := s.buildCreateImageRequest(state, tempImageName)
|
||||
createImageResponse, err := client.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
return client.CreateImage(createImageRequest)
|
||||
},
|
||||
EvalFunc: client.EvalCouldRetryResponse(createImageRetryErrors, EvalRetryErrorType),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return halt(state, err, "Error creating image")
|
||||
}
|
||||
|
||||
imageId := createImageResponse.(*ecs.CreateImageResponse).ImageId
|
||||
|
||||
imagesResponse, err := client.WaitForImageStatus(config.AlicloudRegion, imageId, ImageStatusAvailable, time.Duration(s.WaitSnapshotReadyTimeout)*time.Second)
|
||||
|
||||
// save image first for cleaning up if timeout
|
||||
images := imagesResponse.(*ecs.DescribeImagesResponse).Images.Image
|
||||
if len(images) == 0 {
|
||||
return halt(state, err, "Unable to find created image")
|
||||
}
|
||||
s.image = &images[0]
|
||||
|
||||
if err != nil {
|
||||
return halt(state, err, "Timeout waiting for image to be created")
|
||||
}
|
||||
|
||||
var snapshotIds []string
|
||||
for _, device := range images[0].DiskDeviceMappings.DiskDeviceMapping {
|
||||
snapshotIds = append(snapshotIds, device.SnapshotId)
|
||||
}
|
||||
|
||||
state.Put("alicloudimage", imageId)
|
||||
state.Put("alicloudsnapshots", snapshotIds)
|
||||
|
||||
alicloudImages := make(map[string]string)
|
||||
alicloudImages[config.AlicloudRegion] = images[0].ImageId
|
||||
state.Put("alicloudimages", alicloudImages)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepCreateAlicloudImage) Cleanup(state multistep.StateBag) {
|
||||
if s.image == nil {
|
||||
return
|
||||
}
|
||||
|
||||
config := state.Get("config").(*Config)
|
||||
encryptedSet := config.ImageEncrypted.True()
|
||||
|
||||
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||
_, halted := state.GetOk(multistep.StateHalted)
|
||||
|
||||
if !cancelled && !halted && !encryptedSet {
|
||||
return
|
||||
}
|
||||
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
if !cancelled && !halted && encryptedSet {
|
||||
ui.Say(fmt.Sprintf("Deleting temporary image %s(%s) and related snapshots after finishing encryption...", s.image.ImageId, s.image.ImageName))
|
||||
} else {
|
||||
ui.Say("Deleting the image and related snapshots because of cancellation or error...")
|
||||
}
|
||||
|
||||
deleteImageRequest := ecs.CreateDeleteImageRequest()
|
||||
deleteImageRequest.RegionId = config.AlicloudRegion
|
||||
deleteImageRequest.ImageId = s.image.ImageId
|
||||
if _, err := client.DeleteImage(deleteImageRequest); err != nil {
|
||||
ui.Error(fmt.Sprintf("Error deleting image, it may still be around: %s", err))
|
||||
return
|
||||
}
|
||||
|
||||
//Delete the snapshot of this image
|
||||
for _, diskDevices := range s.image.DiskDeviceMappings.DiskDeviceMapping {
|
||||
deleteSnapshotRequest := ecs.CreateDeleteSnapshotRequest()
|
||||
deleteSnapshotRequest.SnapshotId = diskDevices.SnapshotId
|
||||
if _, err := client.DeleteSnapshot(deleteSnapshotRequest); err != nil {
|
||||
ui.Error(fmt.Sprintf("Error deleting snapshot, it may still be around: %s", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stepCreateAlicloudImage) buildCreateImageRequest(state multistep.StateBag, imageName string) *ecs.CreateImageRequest {
|
||||
config := state.Get("config").(*Config)
|
||||
|
||||
request := ecs.CreateCreateImageRequest()
|
||||
request.ClientToken = uuid.TimeOrderedUUID()
|
||||
request.RegionId = config.AlicloudRegion
|
||||
request.ImageName = imageName
|
||||
request.ImageVersion = config.AlicloudImageVersion
|
||||
request.Description = config.AlicloudImageDescription
|
||||
|
||||
if s.AlicloudImageIgnoreDataDisks {
|
||||
snapshotId := state.Get("alicloudsnapshot").(string)
|
||||
request.SnapshotId = snapshotId
|
||||
} else {
|
||||
instance := state.Get("instance").(*ecs.Instance)
|
||||
request.InstanceId = instance.InstanceId
|
||||
}
|
||||
|
||||
return request
|
||||
}
|
208
builder/alicloud/ecs/step_create_instance.go
Normal file
208
builder/alicloud/ecs/step_create_instance.go
Normal file
@ -0,0 +1,208 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/uuid"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
confighelper "github.com/hashicorp/packer/packer-plugin-sdk/config"
|
||||
)
|
||||
|
||||
type stepCreateAlicloudInstance struct {
|
||||
IOOptimized confighelper.Trilean
|
||||
InstanceType string
|
||||
UserData string
|
||||
UserDataFile string
|
||||
instanceId string
|
||||
RegionId string
|
||||
InternetChargeType string
|
||||
InternetMaxBandwidthOut int
|
||||
InstanceName string
|
||||
ZoneId string
|
||||
instance *ecs.Instance
|
||||
}
|
||||
|
||||
var createInstanceRetryErrors = []string{
|
||||
"IdempotentProcessing",
|
||||
}
|
||||
|
||||
var deleteInstanceRetryErrors = []string{
|
||||
"IncorrectInstanceStatus.Initializing",
|
||||
}
|
||||
|
||||
func (s *stepCreateAlicloudInstance) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
ui.Say("Creating instance...")
|
||||
createInstanceRequest, err := s.buildCreateInstanceRequest(state)
|
||||
if err != nil {
|
||||
return halt(state, err, "")
|
||||
}
|
||||
|
||||
createInstanceResponse, err := client.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
return client.CreateInstance(createInstanceRequest)
|
||||
},
|
||||
EvalFunc: client.EvalCouldRetryResponse(createInstanceRetryErrors, EvalRetryErrorType),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return halt(state, err, "Error creating instance")
|
||||
}
|
||||
|
||||
instanceId := createInstanceResponse.(*ecs.CreateInstanceResponse).InstanceId
|
||||
|
||||
_, err = client.WaitForInstanceStatus(s.RegionId, instanceId, InstanceStatusStopped)
|
||||
if err != nil {
|
||||
return halt(state, err, "Error waiting create instance")
|
||||
}
|
||||
|
||||
describeInstancesRequest := ecs.CreateDescribeInstancesRequest()
|
||||
describeInstancesRequest.InstanceIds = fmt.Sprintf("[\"%s\"]", instanceId)
|
||||
instances, err := client.DescribeInstances(describeInstancesRequest)
|
||||
if err != nil {
|
||||
return halt(state, err, "")
|
||||
}
|
||||
|
||||
ui.Message(fmt.Sprintf("Created instance: %s", instanceId))
|
||||
s.instance = &instances.Instances.Instance[0]
|
||||
state.Put("instance", s.instance)
|
||||
// instance_id is the generic term used so that users can have access to the
|
||||
// instance id inside of the provisioners, used in step_provision.
|
||||
state.Put("instance_id", instanceId)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepCreateAlicloudInstance) Cleanup(state multistep.StateBag) {
|
||||
if s.instance == nil {
|
||||
return
|
||||
}
|
||||
cleanUpMessage(state, "instance")
|
||||
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
_, err := client.WaitForExpected(&WaitForExpectArgs{
|
||||
RequestFunc: func() (responses.AcsResponse, error) {
|
||||
request := ecs.CreateDeleteInstanceRequest()
|
||||
request.InstanceId = s.instance.InstanceId
|
||||
request.Force = requests.NewBoolean(true)
|
||||
return client.DeleteInstance(request)
|
||||
},
|
||||
EvalFunc: client.EvalCouldRetryResponse(deleteInstanceRetryErrors, EvalRetryErrorType),
|
||||
RetryTimes: shortRetryTimes,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
ui.Say(fmt.Sprintf("Failed to clean up instance %s: %s", s.instance.InstanceId, err))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stepCreateAlicloudInstance) buildCreateInstanceRequest(state multistep.StateBag) (*ecs.CreateInstanceRequest, error) {
|
||||
request := ecs.CreateCreateInstanceRequest()
|
||||
request.ClientToken = uuid.TimeOrderedUUID()
|
||||
request.RegionId = s.RegionId
|
||||
request.InstanceType = s.InstanceType
|
||||
request.InstanceName = s.InstanceName
|
||||
request.ZoneId = s.ZoneId
|
||||
|
||||
sourceImage := state.Get("source_image").(*ecs.Image)
|
||||
request.ImageId = sourceImage.ImageId
|
||||
|
||||
securityGroupId := state.Get("securitygroupid").(string)
|
||||
request.SecurityGroupId = securityGroupId
|
||||
|
||||
networkType := state.Get("networktype").(InstanceNetWork)
|
||||
if networkType == InstanceNetworkVpc {
|
||||
vswitchId := state.Get("vswitchid").(string)
|
||||
request.VSwitchId = vswitchId
|
||||
|
||||
userData, err := s.getUserData(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
request.UserData = userData
|
||||
} else {
|
||||
if s.InternetChargeType == "" {
|
||||
s.InternetChargeType = "PayByTraffic"
|
||||
}
|
||||
|
||||
if s.InternetMaxBandwidthOut == 0 {
|
||||
s.InternetMaxBandwidthOut = 5
|
||||
}
|
||||
}
|
||||
request.InternetChargeType = s.InternetChargeType
|
||||
request.InternetMaxBandwidthOut = requests.Integer(convertNumber(s.InternetMaxBandwidthOut))
|
||||
|
||||
if s.IOOptimized.True() {
|
||||
request.IoOptimized = IOOptimizedOptimized
|
||||
} else if s.IOOptimized.False() {
|
||||
request.IoOptimized = IOOptimizedNone
|
||||
}
|
||||
|
||||
config := state.Get("config").(*Config)
|
||||
password := config.Comm.SSHPassword
|
||||
if password == "" && config.Comm.WinRMPassword != "" {
|
||||
password = config.Comm.WinRMPassword
|
||||
}
|
||||
request.Password = password
|
||||
|
||||
systemDisk := config.AlicloudImageConfig.ECSSystemDiskMapping
|
||||
request.SystemDiskDiskName = systemDisk.DiskName
|
||||
request.SystemDiskCategory = systemDisk.DiskCategory
|
||||
request.SystemDiskSize = requests.Integer(convertNumber(systemDisk.DiskSize))
|
||||
request.SystemDiskDescription = systemDisk.Description
|
||||
|
||||
imageDisks := config.AlicloudImageConfig.ECSImagesDiskMappings
|
||||
var dataDisks []ecs.CreateInstanceDataDisk
|
||||
for _, imageDisk := range imageDisks {
|
||||
var dataDisk ecs.CreateInstanceDataDisk
|
||||
dataDisk.DiskName = imageDisk.DiskName
|
||||
dataDisk.Category = imageDisk.DiskCategory
|
||||
dataDisk.Size = string(convertNumber(imageDisk.DiskSize))
|
||||
dataDisk.SnapshotId = imageDisk.SnapshotId
|
||||
dataDisk.Description = imageDisk.Description
|
||||
dataDisk.DeleteWithInstance = strconv.FormatBool(imageDisk.DeleteWithInstance)
|
||||
dataDisk.Device = imageDisk.Device
|
||||
if imageDisk.Encrypted != confighelper.TriUnset {
|
||||
dataDisk.Encrypted = strconv.FormatBool(imageDisk.Encrypted.True())
|
||||
}
|
||||
|
||||
dataDisks = append(dataDisks, dataDisk)
|
||||
}
|
||||
request.DataDisk = &dataDisks
|
||||
|
||||
return request, nil
|
||||
}
|
||||
|
||||
func (s *stepCreateAlicloudInstance) getUserData(state multistep.StateBag) (string, error) {
|
||||
userData := s.UserData
|
||||
|
||||
if s.UserDataFile != "" {
|
||||
data, err := ioutil.ReadFile(s.UserDataFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
userData = string(data)
|
||||
}
|
||||
|
||||
if userData != "" {
|
||||
userData = base64.StdEncoding.EncodeToString([]byte(userData))
|
||||
}
|
||||
|
||||
return userData, nil
|
||||
|
||||
}
|
90
builder/alicloud/ecs/step_create_snapshot.go
Normal file
90
builder/alicloud/ecs/step_create_snapshot.go
Normal file
@ -0,0 +1,90 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
type stepCreateAlicloudSnapshot struct {
|
||||
snapshot *ecs.Snapshot
|
||||
WaitSnapshotReadyTimeout int
|
||||
}
|
||||
|
||||
func (s *stepCreateAlicloudSnapshot) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
instance := state.Get("instance").(*ecs.Instance)
|
||||
|
||||
describeDisksRequest := ecs.CreateDescribeDisksRequest()
|
||||
describeDisksRequest.RegionId = config.AlicloudRegion
|
||||
describeDisksRequest.InstanceId = instance.InstanceId
|
||||
describeDisksRequest.DiskType = DiskTypeSystem
|
||||
disksResponse, err := client.DescribeDisks(describeDisksRequest)
|
||||
if err != nil {
|
||||
return halt(state, err, "Error describe disks")
|
||||
}
|
||||
|
||||
disks := disksResponse.Disks.Disk
|
||||
if len(disks) == 0 {
|
||||
return halt(state, err, "Unable to find system disk of instance")
|
||||
}
|
||||
|
||||
createSnapshotRequest := ecs.CreateCreateSnapshotRequest()
|
||||
createSnapshotRequest.DiskId = disks[0].DiskId
|
||||
snapshot, err := client.CreateSnapshot(createSnapshotRequest)
|
||||
if err != nil {
|
||||
return halt(state, err, "Error creating snapshot")
|
||||
}
|
||||
|
||||
// Create the alicloud snapshot
|
||||
ui.Say(fmt.Sprintf("Creating snapshot from system disk %s: %s", disks[0].DiskId, snapshot.SnapshotId))
|
||||
|
||||
snapshotsResponse, err := client.WaitForSnapshotStatus(config.AlicloudRegion, snapshot.SnapshotId, SnapshotStatusAccomplished, time.Duration(s.WaitSnapshotReadyTimeout)*time.Second)
|
||||
if err != nil {
|
||||
_, ok := err.(errors.Error)
|
||||
if ok {
|
||||
return halt(state, err, "Error querying created snapshot")
|
||||
}
|
||||
|
||||
return halt(state, err, "Timeout waiting for snapshot to be created")
|
||||
}
|
||||
|
||||
snapshots := snapshotsResponse.(*ecs.DescribeSnapshotsResponse).Snapshots.Snapshot
|
||||
if len(snapshots) == 0 {
|
||||
return halt(state, err, "Unable to find created snapshot")
|
||||
}
|
||||
|
||||
s.snapshot = &snapshots[0]
|
||||
state.Put("alicloudsnapshot", snapshot.SnapshotId)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepCreateAlicloudSnapshot) Cleanup(state multistep.StateBag) {
|
||||
if s.snapshot == nil {
|
||||
return
|
||||
}
|
||||
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||
_, halted := state.GetOk(multistep.StateHalted)
|
||||
if !cancelled && !halted {
|
||||
return
|
||||
}
|
||||
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
ui.Say("Deleting the snapshot because of cancellation or error...")
|
||||
|
||||
deleteSnapshotRequest := ecs.CreateDeleteSnapshotRequest()
|
||||
deleteSnapshotRequest.SnapshotId = s.snapshot.SnapshotId
|
||||
if _, err := client.DeleteSnapshot(deleteSnapshotRequest); err != nil {
|
||||
ui.Error(fmt.Sprintf("Error deleting snapshot, it may still be around: %s", err))
|
||||
return
|
||||
}
|
||||
}
|
65
builder/alicloud/ecs/step_create_tags.go
Normal file
65
builder/alicloud/ecs/step_create_tags.go
Normal file
@ -0,0 +1,65 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
type stepCreateTags struct {
|
||||
Tags map[string]string
|
||||
}
|
||||
|
||||
func (s *stepCreateTags) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
imageId := state.Get("alicloudimage").(string)
|
||||
snapshotIds := state.Get("alicloudsnapshots").([]string)
|
||||
|
||||
if len(s.Tags) == 0 {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Adding tags(%s) to image: %s", s.Tags, imageId))
|
||||
|
||||
var tags []ecs.AddTagsTag
|
||||
for key, value := range s.Tags {
|
||||
var tag ecs.AddTagsTag
|
||||
tag.Key = key
|
||||
tag.Value = value
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
|
||||
addTagsRequest := ecs.CreateAddTagsRequest()
|
||||
addTagsRequest.RegionId = config.AlicloudRegion
|
||||
addTagsRequest.ResourceId = imageId
|
||||
addTagsRequest.ResourceType = TagResourceImage
|
||||
addTagsRequest.Tag = &tags
|
||||
|
||||
if _, err := client.AddTags(addTagsRequest); err != nil {
|
||||
return halt(state, err, "Error Adding tags to image")
|
||||
}
|
||||
|
||||
for _, snapshotId := range snapshotIds {
|
||||
ui.Say(fmt.Sprintf("Adding tags(%s) to snapshot: %s", s.Tags, snapshotId))
|
||||
addTagsRequest := ecs.CreateAddTagsRequest()
|
||||
|
||||
addTagsRequest.RegionId = config.AlicloudRegion
|
||||
addTagsRequest.ResourceId = snapshotId
|
||||
addTagsRequest.ResourceType = TagResourceSnapshot
|
||||
addTagsRequest.Tag = &tags
|
||||
|
||||
if _, err := client.AddTags(addTagsRequest); err != nil {
|
||||
return halt(state, err, "Error Adding tags to snapshot")
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
func (s *stepCreateTags) Cleanup(state multistep.StateBag) {
|
||||
// Nothing need to do, tags will be cleaned when the resource is cleaned
|
||||
}
|
101
builder/alicloud/ecs/step_delete_images_snapshots.go
Normal file
101
builder/alicloud/ecs/step_delete_images_snapshots.go
Normal file
@ -0,0 +1,101 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
type stepDeleteAlicloudImageSnapshots struct {
|
||||
AlicloudImageForceDelete bool
|
||||
AlicloudImageForceDeleteSnapshots bool
|
||||
AlicloudImageName string
|
||||
AlicloudImageDestinationRegions []string
|
||||
AlicloudImageDestinationNames []string
|
||||
}
|
||||
|
||||
func (s *stepDeleteAlicloudImageSnapshots) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
|
||||
// Check for force delete
|
||||
if s.AlicloudImageForceDelete {
|
||||
err := s.deleteImageAndSnapshots(state, s.AlicloudImageName, config.AlicloudRegion)
|
||||
if err != nil {
|
||||
return halt(state, err, "")
|
||||
}
|
||||
|
||||
numberOfName := len(s.AlicloudImageDestinationNames)
|
||||
if numberOfName == 0 {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
for index, destinationRegion := range s.AlicloudImageDestinationRegions {
|
||||
if destinationRegion == config.AlicloudRegion {
|
||||
continue
|
||||
}
|
||||
|
||||
if index < numberOfName {
|
||||
err = s.deleteImageAndSnapshots(state, s.AlicloudImageDestinationNames[index], destinationRegion)
|
||||
if err != nil {
|
||||
return halt(state, err, "")
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepDeleteAlicloudImageSnapshots) deleteImageAndSnapshots(state multistep.StateBag, imageName string, region string) error {
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
describeImagesRequest := ecs.CreateDescribeImagesRequest()
|
||||
describeImagesRequest.RegionId = region
|
||||
describeImagesRequest.ImageName = imageName
|
||||
describeImagesRequest.Status = ImageStatusQueried
|
||||
imageResponse, _ := client.DescribeImages(describeImagesRequest)
|
||||
images := imageResponse.Images.Image
|
||||
if len(images) < 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Deleting duplicated image and snapshot in %s: %s", region, imageName))
|
||||
|
||||
for _, image := range images {
|
||||
if image.ImageOwnerAlias != ImageOwnerSelf {
|
||||
log.Printf("You can not delete non-customized images: %s ", image.ImageId)
|
||||
continue
|
||||
}
|
||||
|
||||
deleteImageRequest := ecs.CreateDeleteImageRequest()
|
||||
deleteImageRequest.RegionId = region
|
||||
deleteImageRequest.ImageId = image.ImageId
|
||||
if _, err := client.DeleteImage(deleteImageRequest); err != nil {
|
||||
err := fmt.Errorf("Failed to delete image: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if s.AlicloudImageForceDeleteSnapshots {
|
||||
for _, diskDevice := range image.DiskDeviceMappings.DiskDeviceMapping {
|
||||
deleteSnapshotRequest := ecs.CreateDeleteSnapshotRequest()
|
||||
deleteSnapshotRequest.SnapshotId = diskDevice.SnapshotId
|
||||
if _, err := client.DeleteSnapshot(deleteSnapshotRequest); err != nil {
|
||||
err := fmt.Errorf("Deleting ECS snapshot failed: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stepDeleteAlicloudImageSnapshots) Cleanup(state multistep.StateBag) {
|
||||
}
|
87
builder/alicloud/ecs/step_pre_validate.go
Normal file
87
builder/alicloud/ecs/step_pre_validate.go
Normal file
@ -0,0 +1,87 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
type stepPreValidate struct {
|
||||
AlicloudDestImageName string
|
||||
ForceDelete bool
|
||||
}
|
||||
|
||||
func (s *stepPreValidate) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
if err := s.validateRegions(state); err != nil {
|
||||
return halt(state, err, "")
|
||||
}
|
||||
|
||||
if err := s.validateDestImageName(state); err != nil {
|
||||
return halt(state, err, "")
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepPreValidate) validateRegions(state multistep.StateBag) error {
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
config := state.Get("config").(*Config)
|
||||
|
||||
if config.AlicloudSkipValidation {
|
||||
ui.Say("Skip region validation flag found, skipping prevalidating source region and copied regions.")
|
||||
return nil
|
||||
}
|
||||
|
||||
ui.Say("Prevalidating source region and copied regions...")
|
||||
|
||||
var errs *packer.MultiError
|
||||
if err := config.ValidateRegion(config.AlicloudRegion); err != nil {
|
||||
errs = packer.MultiErrorAppend(errs, err)
|
||||
}
|
||||
for _, region := range config.AlicloudImageDestinationRegions {
|
||||
if err := config.ValidateRegion(region); err != nil {
|
||||
errs = packer.MultiErrorAppend(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stepPreValidate) validateDestImageName(state multistep.StateBag) error {
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
config := state.Get("config").(*Config)
|
||||
|
||||
if s.ForceDelete {
|
||||
ui.Say("Force delete flag found, skipping prevalidating image name.")
|
||||
return nil
|
||||
}
|
||||
|
||||
ui.Say("Prevalidating image name...")
|
||||
|
||||
describeImagesRequest := ecs.CreateDescribeImagesRequest()
|
||||
describeImagesRequest.RegionId = config.AlicloudRegion
|
||||
describeImagesRequest.ImageName = s.AlicloudDestImageName
|
||||
describeImagesRequest.Status = ImageStatusQueried
|
||||
|
||||
imagesResponse, err := client.DescribeImages(describeImagesRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error querying alicloud image: %s", err)
|
||||
}
|
||||
|
||||
images := imagesResponse.Images.Image
|
||||
if len(images) > 0 {
|
||||
return fmt.Errorf("Error: Image Name: '%s' is used by an existing alicloud image: %s", images[0].ImageName, images[0].ImageId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stepPreValidate) Cleanup(multistep.StateBag) {}
|
106
builder/alicloud/ecs/step_region_copy_image.go
Normal file
106
builder/alicloud/ecs/step_region_copy_image.go
Normal file
@ -0,0 +1,106 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
confighelper "github.com/hashicorp/packer/packer-plugin-sdk/config"
|
||||
)
|
||||
|
||||
type stepRegionCopyAlicloudImage struct {
|
||||
AlicloudImageDestinationRegions []string
|
||||
AlicloudImageDestinationNames []string
|
||||
RegionId string
|
||||
}
|
||||
|
||||
func (s *stepRegionCopyAlicloudImage) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
|
||||
if config.ImageEncrypted != confighelper.TriUnset {
|
||||
s.AlicloudImageDestinationRegions = append(s.AlicloudImageDestinationRegions, s.RegionId)
|
||||
s.AlicloudImageDestinationNames = append(s.AlicloudImageDestinationNames, config.AlicloudImageName)
|
||||
}
|
||||
|
||||
if len(s.AlicloudImageDestinationRegions) == 0 {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
srcImageId := state.Get("alicloudimage").(string)
|
||||
alicloudImages := state.Get("alicloudimages").(map[string]string)
|
||||
numberOfName := len(s.AlicloudImageDestinationNames)
|
||||
|
||||
ui.Say(fmt.Sprintf("Coping image %s from %s...", srcImageId, s.RegionId))
|
||||
for index, destinationRegion := range s.AlicloudImageDestinationRegions {
|
||||
if destinationRegion == s.RegionId && config.ImageEncrypted == confighelper.TriUnset {
|
||||
continue
|
||||
}
|
||||
|
||||
ecsImageName := ""
|
||||
if numberOfName > 0 && index < numberOfName {
|
||||
ecsImageName = s.AlicloudImageDestinationNames[index]
|
||||
}
|
||||
|
||||
copyImageRequest := ecs.CreateCopyImageRequest()
|
||||
copyImageRequest.RegionId = s.RegionId
|
||||
copyImageRequest.ImageId = srcImageId
|
||||
copyImageRequest.DestinationRegionId = destinationRegion
|
||||
copyImageRequest.DestinationImageName = ecsImageName
|
||||
if config.ImageEncrypted != confighelper.TriUnset {
|
||||
copyImageRequest.Encrypted = requests.NewBoolean(config.ImageEncrypted.True())
|
||||
}
|
||||
|
||||
imageResponse, err := client.CopyImage(copyImageRequest)
|
||||
if err != nil {
|
||||
return halt(state, err, "Error copying images")
|
||||
}
|
||||
|
||||
alicloudImages[destinationRegion] = imageResponse.ImageId
|
||||
ui.Message(fmt.Sprintf("Copy image from %s(%s) to %s(%s)", s.RegionId, srcImageId, destinationRegion, imageResponse.ImageId))
|
||||
}
|
||||
|
||||
if config.ImageEncrypted != confighelper.TriUnset {
|
||||
if _, err := client.WaitForImageStatus(s.RegionId, alicloudImages[s.RegionId], ImageStatusAvailable, time.Duration(ALICLOUD_DEFAULT_LONG_TIMEOUT)*time.Second); err != nil {
|
||||
return halt(state, err, fmt.Sprintf("Timeout waiting image %s finish copying", alicloudImages[s.RegionId]))
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepRegionCopyAlicloudImage) Cleanup(state multistep.StateBag) {
|
||||
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||
_, halted := state.GetOk(multistep.StateHalted)
|
||||
|
||||
if !cancelled && !halted {
|
||||
return
|
||||
}
|
||||
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
ui.Say(fmt.Sprintf("Stopping copy image because cancellation or error..."))
|
||||
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
alicloudImages := state.Get("alicloudimages").(map[string]string)
|
||||
srcImageId := state.Get("alicloudimage").(string)
|
||||
|
||||
for copiedRegionId, copiedImageId := range alicloudImages {
|
||||
if copiedImageId == srcImageId {
|
||||
continue
|
||||
}
|
||||
|
||||
cancelCopyImageRequest := ecs.CreateCancelCopyImageRequest()
|
||||
cancelCopyImageRequest.RegionId = copiedRegionId
|
||||
cancelCopyImageRequest.ImageId = copiedImageId
|
||||
if _, err := client.CancelCopyImage(cancelCopyImageRequest); err != nil {
|
||||
|
||||
ui.Error(fmt.Sprintf("Error cancelling copy image: %v", err))
|
||||
}
|
||||
}
|
||||
}
|
72
builder/alicloud/ecs/step_run_instance.go
Normal file
72
builder/alicloud/ecs/step_run_instance.go
Normal file
@ -0,0 +1,72 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
type stepRunAlicloudInstance struct {
|
||||
}
|
||||
|
||||
func (s *stepRunAlicloudInstance) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
instance := state.Get("instance").(*ecs.Instance)
|
||||
|
||||
startInstanceRequest := ecs.CreateStartInstanceRequest()
|
||||
startInstanceRequest.InstanceId = instance.InstanceId
|
||||
if _, err := client.StartInstance(startInstanceRequest); err != nil {
|
||||
return halt(state, err, "Error starting instance")
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Starting instance: %s", instance.InstanceId))
|
||||
|
||||
_, err := client.WaitForInstanceStatus(instance.RegionId, instance.InstanceId, InstanceStatusRunning)
|
||||
if err != nil {
|
||||
return halt(state, err, "Timeout waiting for instance to start")
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepRunAlicloudInstance) Cleanup(state multistep.StateBag) {
|
||||
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||
_, halted := state.GetOk(multistep.StateHalted)
|
||||
|
||||
if !cancelled && !halted {
|
||||
return
|
||||
}
|
||||
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
instance := state.Get("instance").(*ecs.Instance)
|
||||
|
||||
describeInstancesRequest := ecs.CreateDescribeInstancesRequest()
|
||||
describeInstancesRequest.InstanceIds = fmt.Sprintf("[\"%s\"]", instance.InstanceId)
|
||||
instancesResponse, _ := client.DescribeInstances(describeInstancesRequest)
|
||||
|
||||
if len(instancesResponse.Instances.Instance) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
instanceAttribute := instancesResponse.Instances.Instance[0]
|
||||
if instanceAttribute.Status == InstanceStatusStarting || instanceAttribute.Status == InstanceStatusRunning {
|
||||
stopInstanceRequest := ecs.CreateStopInstanceRequest()
|
||||
stopInstanceRequest.InstanceId = instance.InstanceId
|
||||
stopInstanceRequest.ForceStop = requests.NewBoolean(true)
|
||||
if _, err := client.StopInstance(stopInstanceRequest); err != nil {
|
||||
ui.Say(fmt.Sprintf("Error stopping instance %s, it may still be around %s", instance.InstanceId, err))
|
||||
return
|
||||
}
|
||||
|
||||
_, err := client.WaitForInstanceStatus(instance.RegionId, instance.InstanceId, InstanceStatusStopped)
|
||||
if err != nil {
|
||||
ui.Say(fmt.Sprintf("Error stopping instance %s, it may still be around %s", instance.InstanceId, err))
|
||||
}
|
||||
}
|
||||
}
|
60
builder/alicloud/ecs/step_share_image.go
Normal file
60
builder/alicloud/ecs/step_share_image.go
Normal file
@ -0,0 +1,60 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
type stepShareAlicloudImage struct {
|
||||
AlicloudImageShareAccounts []string
|
||||
AlicloudImageUNShareAccounts []string
|
||||
RegionId string
|
||||
}
|
||||
|
||||
func (s *stepShareAlicloudImage) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
alicloudImages := state.Get("alicloudimages").(map[string]string)
|
||||
|
||||
for regionId, imageId := range alicloudImages {
|
||||
modifyImageShareRequest := ecs.CreateModifyImageSharePermissionRequest()
|
||||
modifyImageShareRequest.RegionId = regionId
|
||||
modifyImageShareRequest.ImageId = imageId
|
||||
modifyImageShareRequest.AddAccount = &s.AlicloudImageShareAccounts
|
||||
modifyImageShareRequest.RemoveAccount = &s.AlicloudImageUNShareAccounts
|
||||
|
||||
if _, err := client.ModifyImageSharePermission(modifyImageShareRequest); err != nil {
|
||||
return halt(state, err, "Failed modifying image share permissions")
|
||||
}
|
||||
}
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepShareAlicloudImage) Cleanup(state multistep.StateBag) {
|
||||
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||
_, halted := state.GetOk(multistep.StateHalted)
|
||||
|
||||
if !cancelled && !halted {
|
||||
return
|
||||
}
|
||||
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
alicloudImages := state.Get("alicloudimages").(map[string]string)
|
||||
|
||||
ui.Say("Restoring image share permission because cancellations or error...")
|
||||
|
||||
for regionId, imageId := range alicloudImages {
|
||||
modifyImageShareRequest := ecs.CreateModifyImageSharePermissionRequest()
|
||||
modifyImageShareRequest.RegionId = regionId
|
||||
modifyImageShareRequest.ImageId = imageId
|
||||
modifyImageShareRequest.AddAccount = &s.AlicloudImageUNShareAccounts
|
||||
modifyImageShareRequest.RemoveAccount = &s.AlicloudImageShareAccounts
|
||||
if _, err := client.ModifyImageSharePermission(modifyImageShareRequest); err != nil {
|
||||
ui.Say(fmt.Sprintf("Restoring image share permission failed: %s", err))
|
||||
}
|
||||
}
|
||||
}
|
48
builder/alicloud/ecs/step_stop_instance.go
Normal file
48
builder/alicloud/ecs/step_stop_instance.go
Normal file
@ -0,0 +1,48 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
type stepStopAlicloudInstance struct {
|
||||
ForceStop bool
|
||||
DisableStop bool
|
||||
}
|
||||
|
||||
func (s *stepStopAlicloudInstance) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
client := state.Get("client").(*ClientWrapper)
|
||||
instance := state.Get("instance").(*ecs.Instance)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
if !s.DisableStop {
|
||||
ui.Say(fmt.Sprintf("Stopping instance: %s", instance.InstanceId))
|
||||
|
||||
stopInstanceRequest := ecs.CreateStopInstanceRequest()
|
||||
stopInstanceRequest.InstanceId = instance.InstanceId
|
||||
stopInstanceRequest.ForceStop = requests.Boolean(strconv.FormatBool(s.ForceStop))
|
||||
if _, err := client.StopInstance(stopInstanceRequest); err != nil {
|
||||
return halt(state, err, "Error stopping alicloud instance")
|
||||
}
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Waiting instance stopped: %s", instance.InstanceId))
|
||||
|
||||
_, err := client.WaitForInstanceStatus(instance.RegionId, instance.InstanceId, InstanceStatusStopped)
|
||||
if err != nil {
|
||||
return halt(state, err, "Error waiting for alicloud instance to stop")
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepStopAlicloudInstance) Cleanup(multistep.StateBag) {
|
||||
// No cleanup...
|
||||
}
|
25
builder/alicloud/examples/basic/alicloud.json
Normal file
25
builder/alicloud/examples/basic/alicloud.json
Normal file
@ -0,0 +1,25 @@
|
||||
{
|
||||
"variables": {
|
||||
"access_key": "{{env `ALICLOUD_ACCESS_KEY`}}",
|
||||
"secret_key": "{{env `ALICLOUD_SECRET_KEY`}}"
|
||||
},
|
||||
"builders": [{
|
||||
"type":"alicloud-ecs",
|
||||
"access_key":"{{user `access_key`}}",
|
||||
"secret_key":"{{user `secret_key`}}",
|
||||
"region":"cn-beijing",
|
||||
"image_name":"packer_basic",
|
||||
"source_image":"centos_7_03_64_20G_alibase_20170818.vhd",
|
||||
"ssh_username":"root",
|
||||
"instance_type":"ecs.n1.tiny",
|
||||
"internet_charge_type":"PayByTraffic",
|
||||
"io_optimized":"true"
|
||||
}],
|
||||
"provisioners": [{
|
||||
"type": "shell",
|
||||
"inline": [
|
||||
"sleep 30",
|
||||
"yum install redis.x86_64 -y"
|
||||
]
|
||||
}]
|
||||
}
|
27
builder/alicloud/examples/basic/alicloud_windows.json
Normal file
27
builder/alicloud/examples/basic/alicloud_windows.json
Normal file
@ -0,0 +1,27 @@
|
||||
{
|
||||
"variables": {
|
||||
"access_key": "{{env `ALICLOUD_ACCESS_KEY`}}",
|
||||
"secret_key": "{{env `ALICLOUD_SECRET_KEY`}}"
|
||||
},
|
||||
"builders": [{
|
||||
"type":"alicloud-ecs",
|
||||
"access_key":"{{user `access_key`}}",
|
||||
"secret_key":"{{user `secret_key`}}",
|
||||
"region":"cn-beijing",
|
||||
"image_name":"packer_test",
|
||||
"source_image":"winsvr_64_dtcC_1809_en-us_40G_alibase_20190318.vhd",
|
||||
"instance_type":"ecs.n1.tiny",
|
||||
"io_optimized":"true",
|
||||
"internet_charge_type":"PayByTraffic",
|
||||
"image_force_delete":"true",
|
||||
"communicator": "winrm",
|
||||
"winrm_port": 5985,
|
||||
"winrm_username": "Administrator",
|
||||
"winrm_password": "Test1234",
|
||||
"user_data_file": "examples/alicloud/basic/winrm_enable_userdata.ps1"
|
||||
}],
|
||||
"provisioners": [{
|
||||
"type": "powershell",
|
||||
"inline": ["dir c:\\"]
|
||||
}]
|
||||
}
|
37
builder/alicloud/examples/basic/alicloud_with_data_disk.json
Normal file
37
builder/alicloud/examples/basic/alicloud_with_data_disk.json
Normal file
@ -0,0 +1,37 @@
|
||||
{
|
||||
"variables": {
|
||||
"access_key": "{{env `ALICLOUD_ACCESS_KEY`}}",
|
||||
"secret_key": "{{env `ALICLOUD_SECRET_KEY`}}"
|
||||
},
|
||||
"builders": [{
|
||||
"type":"alicloud-ecs",
|
||||
"access_key":"{{user `access_key`}}",
|
||||
"secret_key":"{{user `secret_key`}}",
|
||||
"region":"cn-beijing",
|
||||
"image_name":"packer_with_data_disk",
|
||||
"source_image":"centos_7_03_64_20G_alibase_20170818.vhd",
|
||||
"ssh_username":"root",
|
||||
"instance_type":"ecs.n1.tiny",
|
||||
"internet_charge_type":"PayByTraffic",
|
||||
"io_optimized":"true",
|
||||
"image_disk_mappings":[
|
||||
{
|
||||
"disk_name":"data1",
|
||||
"disk_size":20,
|
||||
"disk_delete_with_instance": true
|
||||
},{
|
||||
"disk_name":"data2",
|
||||
"disk_size":20,
|
||||
"disk_device":"/dev/xvdz",
|
||||
"disk_delete_with_instance": true
|
||||
}
|
||||
]
|
||||
}],
|
||||
"provisioners": [{
|
||||
"type": "shell",
|
||||
"inline": [
|
||||
"sleep 30",
|
||||
"yum install redis.x86_64 -y"
|
||||
]
|
||||
}]
|
||||
}
|
26
builder/alicloud/examples/basic/winrm_enable_userdata.ps1
Normal file
26
builder/alicloud/examples/basic/winrm_enable_userdata.ps1
Normal file
@ -0,0 +1,26 @@
|
||||
#powershell
|
||||
write-output "Running User Data Script"
|
||||
write-host "(host) Running User Data Script"
|
||||
Set-ExecutionPolicy Unrestricted -Scope LocalMachine -Force -ErrorAction Ignore
|
||||
# Don't set this before Set-ExecutionPolicy as it throws an error
|
||||
$ErrorActionPreference = "stop"
|
||||
# Remove HTTP listener
|
||||
Remove-Item -Path WSMan:\Localhost\listener\listener* -Recurse
|
||||
# WinRM
|
||||
write-output "Setting up WinRM"
|
||||
write-host "(host) setting up WinRM"
|
||||
cmd.exe /c winrm quickconfig -q
|
||||
cmd.exe /c winrm quickconfig '-transport:http'
|
||||
cmd.exe /c winrm set "winrm/config" '@{MaxTimeoutms="1800000"}'
|
||||
cmd.exe /c winrm set "winrm/config/winrs" '@{MaxMemoryPerShellMB="10240"}'
|
||||
cmd.exe /c winrm set "winrm/config/service" '@{AllowUnencrypted="true"}'
|
||||
cmd.exe /c winrm set "winrm/config/client" '@{AllowUnencrypted="true"}'
|
||||
cmd.exe /c winrm set "winrm/config/service/auth" '@{Basic="true"}'
|
||||
cmd.exe /c winrm set "winrm/config/client/auth" '@{Basic="true"}'
|
||||
cmd.exe /c winrm set "winrm/config/service/auth" '@{CredSSP="true"}'
|
||||
cmd.exe /c winrm set "winrm/config/listener?Address=*+Transport=HTTP" '@{Port="5985"}'
|
||||
cmd.exe /c netsh advfirewall firewall set rule group="remote administration" new enable=yes
|
||||
cmd.exe /c netsh firewall add portopening TCP 5985 "Port 5985"
|
||||
cmd.exe /c net stop winrm
|
||||
cmd.exe /c sc config winrm start= auto
|
||||
cmd.exe /c net start winrm
|
34
builder/alicloud/examples/chef/alicloud.json
Normal file
34
builder/alicloud/examples/chef/alicloud.json
Normal file
@ -0,0 +1,34 @@
|
||||
{
|
||||
"variables": {
|
||||
"access_key": "{{env `ALICLOUD_ACCESS_KEY`}}",
|
||||
"secret_key": "{{env `ALICLOUD_SECRET_KEY`}}"
|
||||
},
|
||||
"builders": [{
|
||||
"type":"alicloud-ecs",
|
||||
"access_key":"{{user `access_key`}}",
|
||||
"secret_key":"{{user `secret_key`}}",
|
||||
"region":"cn-beijing",
|
||||
"image_name":"packer_chef2",
|
||||
"source_image":"ubuntu_18_04_64_20G_alibase_20190223.vhd",
|
||||
"ssh_username":"root",
|
||||
"instance_type":"ecs.n1.medium",
|
||||
"io_optimized":"true",
|
||||
"image_force_delete":"true",
|
||||
"internet_charge_type":"PayByTraffic",
|
||||
"ssh_password":"Test1234",
|
||||
"user_data_file":"examples/alicloud/chef/user_data.sh"
|
||||
}],
|
||||
"provisioners": [{
|
||||
"type": "file",
|
||||
"source": "examples/alicloud/chef/chef.sh",
|
||||
"destination": "/root/"
|
||||
},{
|
||||
"type": "shell",
|
||||
"inline": [
|
||||
"cd /root/",
|
||||
"chmod 755 chef.sh",
|
||||
"./chef.sh",
|
||||
"chef-server-ctl reconfigure"
|
||||
]
|
||||
}]
|
||||
}
|
47
builder/alicloud/examples/chef/chef.sh
Normal file
47
builder/alicloud/examples/chef/chef.sh
Normal file
@ -0,0 +1,47 @@
|
||||
#!/bin/sh
|
||||
#if the related deb pkg not found, please replace with it other available repository url
|
||||
HOSTNAME=`ifconfig eth1|grep 'inet addr'|cut -d ":" -f2|cut -d " " -f1`
|
||||
if [ not $HOSTNAME ] ; then
|
||||
HOSTNAME=`ifconfig eth0|grep 'inet addr'|cut -d ":" -f2|cut -d " " -f1`
|
||||
fi
|
||||
CHEF_SERVER_URL='http://dubbo.oss-cn-shenzhen.aliyuncs.com/chef-server-core_12.8.0-1_amd64.deb'
|
||||
CHEF_CONSOLE_URL='http://dubbo.oss-cn-shenzhen.aliyuncs.com/chef-manage_2.4.3-1_amd64.deb'
|
||||
CHEF_SERVER_ADMIN='admin'
|
||||
CHEF_SERVER_ADMIN_PASSWORD='vmADMIN123'
|
||||
ORGANIZATION='aliyun'
|
||||
ORGANIZATION_FULL_NAME='Aliyun, Inc'
|
||||
#specify hostname
|
||||
hostname $HOSTNAME
|
||||
|
||||
mkdir ~/.pemfile
|
||||
#install chef server
|
||||
wget $CHEF_SERVER_URL
|
||||
sudo dpkg -i chef-server-core_*.deb
|
||||
sudo chef-server-ctl reconfigure
|
||||
|
||||
#create admin user
|
||||
sudo chef-server-ctl user-create $CHEF_SERVER_ADMIN $CHEF_SERVER_ADMIN $CHEF_SERVER_ADMIN 641002259@qq.com $CHEF_SERVER_ADMIN_PASSWORD -f ~/.pemfile/admin.pem
|
||||
|
||||
#create aliyun organization
|
||||
sudo chef-server-ctl org-create $ORGANIZATION $ORGANIZATION_FULL_NAME --association_user $CHEF_SERVER_ADMIN -f ~/.pemfile/aliyun-validator.pem
|
||||
|
||||
#install chef management console
|
||||
wget $CHEF_CONSOLE_URL
|
||||
sudo dpkg -i chef-manage_*.deb
|
||||
sudo chef-server-ctl reconfigure
|
||||
|
||||
type expect >/dev/null 2>&1 || { echo >&2 "Install Expect..."; apt-get -y install expect; }
|
||||
echo "spawn sudo chef-manage-ctl reconfigure" >> chef-manage-confirm.exp
|
||||
echo "expect \"*Press any key to continue\"" >> chef-manage-confirm.exp
|
||||
echo "send \"a\\\n\"" >> chef-manage-confirm.exp
|
||||
echo "expect \".*chef-manage 2.4.3 license: \\\"Chef-MLSA\\\".*\"" >> chef-manage-confirm.exp
|
||||
echo "send \"q\"" >> chef-manage-confirm.exp
|
||||
echo "expect \".*Type 'yes' to accept the software license agreement, or anything else to cancel.\"" >> chef-manage-confirm.exp
|
||||
echo "send \"yes\\\n\"" >> chef-manage-confirm.exp
|
||||
echo "interact" >> chef-manage-confirm.exp
|
||||
expect chef-manage-confirm.exp
|
||||
rm -f chef-manage-confirm.exp
|
||||
|
||||
#clean
|
||||
rm -rf chef-manage_2.4.3-1_amd64.deb
|
||||
rm -rf chef-server-core_12.8.0-1_amd64.deb
|
6
builder/alicloud/examples/chef/user_data.sh
Normal file
6
builder/alicloud/examples/chef/user_data.sh
Normal file
@ -0,0 +1,6 @@
|
||||
HOSTNAME=`ifconfig eth1|grep 'inet addr'|cut -d ":" -f2|cut -d " " -f1`
|
||||
if [ not $HOSTNAME ] ; then
|
||||
HOSTNAME=`ifconfig eth0|grep 'inet addr'|cut -d ":" -f2|cut -d " " -f1`
|
||||
fi
|
||||
hostname $HOSTNAME
|
||||
chef-server-ctl reconfigure
|
32
builder/alicloud/examples/jenkins/alicloud.json
Normal file
32
builder/alicloud/examples/jenkins/alicloud.json
Normal file
@ -0,0 +1,32 @@
|
||||
{
|
||||
"variables": {
|
||||
"access_key": "{{env `ALICLOUD_ACCESS_KEY`}}",
|
||||
"secret_key": "{{env `ALICLOUD_SECRET_KEY`}}"
|
||||
},
|
||||
"builders": [{
|
||||
"type":"alicloud-ecs",
|
||||
"access_key":"{{user `access_key`}}",
|
||||
"secret_key":"{{user `secret_key`}}",
|
||||
"region":"cn-beijing",
|
||||
"image_name":"packer_jenkins",
|
||||
"source_image":"ubuntu_18_04_64_20G_alibase_20190223.vhd",
|
||||
"ssh_username":"root",
|
||||
"instance_type":"ecs.n1.medium",
|
||||
"io_optimized":"true",
|
||||
"internet_charge_type":"PayByTraffic",
|
||||
"image_force_delete":"true",
|
||||
"ssh_password":"Test12345"
|
||||
}],
|
||||
"provisioners": [{
|
||||
"type": "file",
|
||||
"source": "examples/alicloud/jenkins/jenkins.sh",
|
||||
"destination": "/root/"
|
||||
},{
|
||||
"type": "shell",
|
||||
"inline": [
|
||||
"cd /root/",
|
||||
"chmod 755 jenkins.sh",
|
||||
"./jenkins.sh"
|
||||
]
|
||||
}]
|
||||
}
|
48
builder/alicloud/examples/jenkins/jenkins.sh
Normal file
48
builder/alicloud/examples/jenkins/jenkins.sh
Normal file
@ -0,0 +1,48 @@
|
||||
#!/bin/sh
|
||||
|
||||
JENKINS_URL='http://mirrors.jenkins.io/war-stable/2.32.2/jenkins.war'
|
||||
|
||||
TOMCAT_VERSION='7.0.77'
|
||||
TOMCAT_NAME="apache-tomcat-$TOMCAT_VERSION"
|
||||
TOMCAT_PACKAGE="$TOMCAT_NAME.tar.gz"
|
||||
TOMCAT_URL="http://mirror.bit.edu.cn/apache/tomcat/tomcat-7/v$TOMCAT_VERSION/bin/$TOMCAT_PACKAGE"
|
||||
TOMCAT_PATH="/opt/$TOMCAT_NAME"
|
||||
|
||||
#install jdk
|
||||
if grep -Eqi "Ubuntu|Debian|Raspbian" /etc/issue || grep -Eq "Ubuntu|Debian|Raspbian" /etc/*-release; then
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y openjdk-7-jdk
|
||||
elif grep -Eqi "CentOS|Fedora|Red Hat Enterprise Linux Server" /etc/issue || grep -Eq "CentOS|Fedora|Red Hat Enterprise Linux Server" /etc/*-release; then
|
||||
sudo yum update -y
|
||||
sudo yum install -y openjdk-7-jdk
|
||||
else
|
||||
echo "Unknown OS type."
|
||||
fi
|
||||
|
||||
#install jenkins server
|
||||
mkdir ~/work
|
||||
cd ~/work
|
||||
|
||||
#install tomcat
|
||||
wget $TOMCAT_URL
|
||||
tar -zxvf $TOMCAT_PACKAGE
|
||||
mv $TOMCAT_NAME /opt
|
||||
|
||||
#install
|
||||
wget $JENKINS_URL
|
||||
mv jenkins.war $TOMCAT_PATH/webapps/
|
||||
|
||||
#set environment
|
||||
echo "TOMCAT_PATH=\"$TOMCAT_PATH\"">>/etc/profile
|
||||
echo "JENKINS_HOME=\"$TOMCAT_PATH/webapps/jenkins\"">>/etc/profile
|
||||
echo PATH="\"\$PATH:\$TOMCAT_PATH:\$JENKINS_HOME\"">>/etc/profile
|
||||
. /etc/profile
|
||||
|
||||
#start tomcat & jenkins
|
||||
$TOMCAT_PATH/bin/startup.sh
|
||||
|
||||
#set start on boot
|
||||
sed -i "/#!\/bin\/sh/a$TOMCAT_PATH/bin/startup.sh" /etc/rc.local
|
||||
|
||||
#clean
|
||||
rm -rf ~/work
|
59
builder/alicloud/examples/local/centos.json
Normal file
59
builder/alicloud/examples/local/centos.json
Normal file
@ -0,0 +1,59 @@
|
||||
{"variables": {
|
||||
"box_basename": "centos-6.8",
|
||||
"build_timestamp": "{{isotime \"20060102150405\"}}",
|
||||
"cpus": "1",
|
||||
"disk_size": "4096",
|
||||
"git_revision": "__unknown_git_revision__",
|
||||
"headless": "",
|
||||
"http_proxy": "{{env `http_proxy`}}",
|
||||
"https_proxy": "{{env `https_proxy`}}",
|
||||
"iso_checksum": "md5:0ca12fe5f28c2ceed4f4084b41ff8a0b",
|
||||
"iso_name": "CentOS-6.8-x86_64-minimal.iso",
|
||||
"ks_path": "centos-6.8/ks.cfg",
|
||||
"memory": "512",
|
||||
"metadata": "floppy/dummy_metadata.json",
|
||||
"mirror": "http://mirrors.aliyun.com/centos",
|
||||
"mirror_directory": "6.8/isos/x86_64",
|
||||
"name": "centos-6.8",
|
||||
"no_proxy": "{{env `no_proxy`}}",
|
||||
"template": "centos-6.8-x86_64",
|
||||
"version": "2.1.TIMESTAMP"
|
||||
},
|
||||
"builders":[
|
||||
{
|
||||
"boot_command": [
|
||||
"<tab> text ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/{{user `ks_path`}}<enter><wait>"
|
||||
],
|
||||
"boot_wait": "10s",
|
||||
"disk_size": "{{user `disk_size`}}",
|
||||
"headless": "{{ user `headless` }}",
|
||||
"http_directory": "http",
|
||||
"iso_checksum": "{{user `iso_checksum`}}",
|
||||
"iso_checksum_type": "{{user `iso_checksum_type`}}",
|
||||
"iso_url": "{{user `mirror`}}/{{user `mirror_directory`}}/{{user `iso_name`}}",
|
||||
"output_directory": "packer-{{user `template`}}-qemu",
|
||||
"shutdown_command": "echo 'vagrant'|sudo -S /sbin/halt -h -p",
|
||||
"ssh_password": "vagrant",
|
||||
"ssh_port": 22,
|
||||
"ssh_username": "root",
|
||||
"ssh_timeout": "10000s",
|
||||
"type": "qemu",
|
||||
"vm_name": "{{ user `template` }}.raw",
|
||||
"net_device": "virtio-net",
|
||||
"disk_interface": "virtio",
|
||||
"format": "raw"
|
||||
}
|
||||
],
|
||||
"post-processors":[
|
||||
{
|
||||
"type":"alicloud-import",
|
||||
"oss_bucket_name": "packer",
|
||||
"image_name": "packer_import",
|
||||
"image_os_type": "linux",
|
||||
"image_platform": "CentOS",
|
||||
"image_architecture": "x86_64",
|
||||
"image_system_size": "40",
|
||||
"region":"cn-beijing"
|
||||
}
|
||||
]
|
||||
}
|
69
builder/alicloud/examples/local/http/centos-6.8/ks.cfg
Normal file
69
builder/alicloud/examples/local/http/centos-6.8/ks.cfg
Normal file
@ -0,0 +1,69 @@
|
||||
install
|
||||
cdrom
|
||||
lang en_US.UTF-8
|
||||
keyboard us
|
||||
network --bootproto=dhcp
|
||||
rootpw vagrant
|
||||
firewall --disabled
|
||||
selinux --permissive
|
||||
timezone UTC
|
||||
unsupported_hardware
|
||||
bootloader --location=mbr
|
||||
text
|
||||
skipx
|
||||
zerombr
|
||||
clearpart --all --initlabel
|
||||
autopart
|
||||
auth --enableshadow --passalgo=sha512 --kickstart
|
||||
firstboot --disabled
|
||||
reboot
|
||||
user --name=vagrant --plaintext --password vagrant
|
||||
key --skip
|
||||
|
||||
%packages --nobase --ignoremissing --excludedocs
|
||||
# vagrant needs this to copy initial files via scp
|
||||
openssh-clients
|
||||
sudo
|
||||
kernel-headers
|
||||
kernel-devel
|
||||
gcc
|
||||
make
|
||||
perl
|
||||
wget
|
||||
nfs-utils
|
||||
-fprintd-pam
|
||||
-intltool
|
||||
|
||||
# unnecessary firmware
|
||||
-aic94xx-firmware
|
||||
-atmel-firmware
|
||||
-b43-openfwwf
|
||||
-bfa-firmware
|
||||
-ipw2100-firmware
|
||||
-ipw2200-firmware
|
||||
-ivtv-firmware
|
||||
-iwl100-firmware
|
||||
-iwl1000-firmware
|
||||
-iwl3945-firmware
|
||||
-iwl4965-firmware
|
||||
-iwl5000-firmware
|
||||
-iwl5150-firmware
|
||||
-iwl6000-firmware
|
||||
-iwl6000g2a-firmware
|
||||
-iwl6050-firmware
|
||||
-libertas-usb8388-firmware
|
||||
-ql2100-firmware
|
||||
-ql2200-firmware
|
||||
-ql23xx-firmware
|
||||
-ql2400-firmware
|
||||
-ql2500-firmware
|
||||
-rt61pci-firmware
|
||||
-rt73usb-firmware
|
||||
-xorg-x11-drv-ati-firmware
|
||||
-zd1211-firmware
|
||||
|
||||
%post
|
||||
# Force to set SELinux to a permissive mode
|
||||
sed -i -e 's/\(^SELINUX=\).*$/\1permissive/' /etc/selinux/config
|
||||
# sudo
|
||||
echo "%vagrant ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers.d/vagrant
|
13
builder/alicloud/version/version.go
Normal file
13
builder/alicloud/version/version.go
Normal file
@ -0,0 +1,13 @@
|
||||
package version
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/packer/helper/version"
|
||||
packerVersion "github.com/hashicorp/packer/version"
|
||||
)
|
||||
|
||||
var AlicloudPluginVersion *version.PluginVersion
|
||||
|
||||
func init() {
|
||||
AlicloudPluginVersion = version.InitializePluginVersion(
|
||||
packerVersion.Version, packerVersion.VersionPrerelease)
|
||||
}
|
504
builder/amazon/chroot/builder.go
Normal file
504
builder/amazon/chroot/builder.go
Normal file
@ -0,0 +1,504 @@
|
||||
//go:generate struct-markdown
|
||||
//go:generate mapstructure-to-hcl2 -type Config,BlockDevices,BlockDevice
|
||||
|
||||
// The chroot package is able to create an Amazon AMI without requiring the
|
||||
// launch of a new instance for every build. It does this by attaching and
|
||||
// mounting the root volume of another AMI and chrooting into that directory.
|
||||
// It then creates an AMI from that attached drive.
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"runtime"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
"github.com/hashicorp/packer/hcl2template"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/chroot"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/common"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/commonsteps"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/config"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/packerbuilderdata"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
// The unique ID for this builder
|
||||
const BuilderId = "mitchellh.amazon.chroot"
|
||||
|
||||
// Config is the configuration that is chained through the steps and settable
|
||||
// from the template.
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
awscommon.AMIConfig `mapstructure:",squash"`
|
||||
awscommon.AccessConfig `mapstructure:",squash"`
|
||||
// Add one or more [block device
|
||||
// mappings](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html)
|
||||
// to the AMI. If this field is populated, and you are building from an
|
||||
// existing source image, the block device mappings in the source image
|
||||
// will be overwritten. This means you must have a block device mapping
|
||||
// entry for your root volume, `root_volume_size` and `root_device_name`.
|
||||
// See the [BlockDevices](#block-devices-configuration) documentation for
|
||||
// fields.
|
||||
AMIMappings awscommon.BlockDevices `mapstructure:"ami_block_device_mappings" hcl2-schema-generator:"ami_block_device_mappings,direct" required:"false"`
|
||||
// This is a list of devices to mount into the chroot environment. This
|
||||
// configuration parameter requires some additional documentation which is
|
||||
// in the Chroot Mounts section. Please read that section for more
|
||||
// information on how to use this.
|
||||
ChrootMounts [][]string `mapstructure:"chroot_mounts" required:"false"`
|
||||
// How to run shell commands. This defaults to `{{.Command}}`. This may be
|
||||
// useful to set if you want to set environmental variables or perhaps run
|
||||
// it with sudo or so on. This is a configuration template where the
|
||||
// .Command variable is replaced with the command to be run. Defaults to
|
||||
// `{{.Command}}`.
|
||||
CommandWrapper string `mapstructure:"command_wrapper" required:"false"`
|
||||
// Paths to files on the running EC2 instance that will be copied into the
|
||||
// chroot environment prior to provisioning. Defaults to /etc/resolv.conf
|
||||
// so that DNS lookups work. Pass an empty list to skip copying
|
||||
// /etc/resolv.conf. You may need to do this if you're building an image
|
||||
// that uses systemd.
|
||||
CopyFiles []string `mapstructure:"copy_files" required:"false"`
|
||||
// The path to the device where the root volume of the source AMI will be
|
||||
// attached. This defaults to "" (empty string), which forces Packer to
|
||||
// find an open device automatically.
|
||||
DevicePath string `mapstructure:"device_path" required:"false"`
|
||||
// When we call the mount command (by default mount -o device dir), the
|
||||
// string provided in nvme_mount_path will replace device in that command.
|
||||
// When this option is not set, device in that command will be something
|
||||
// like /dev/sdf1, mirroring the attached device name. This assumption
|
||||
// works for most instances but will fail with c5 and m5 instances. In
|
||||
// order to use the chroot builder with c5 and m5 instances, you must
|
||||
// manually set nvme_device_path and device_path.
|
||||
NVMEDevicePath string `mapstructure:"nvme_device_path" required:"false"`
|
||||
// Build a new volume instead of starting from an existing AMI root volume
|
||||
// snapshot. Default false. If true, source_ami/source_ami_filter are no
|
||||
// longer used and the following options become required:
|
||||
// ami_virtualization_type, pre_mount_commands and root_volume_size.
|
||||
FromScratch bool `mapstructure:"from_scratch" required:"false"`
|
||||
// Options to supply the mount command when mounting devices. Each option
|
||||
// will be prefixed with -o and supplied to the mount command ran by
|
||||
// Packer. Because this command is ran in a shell, user discretion is
|
||||
// advised. See this manual page for the mount command for valid file
|
||||
// system specific options.
|
||||
MountOptions []string `mapstructure:"mount_options" required:"false"`
|
||||
// The partition number containing the / partition. By default this is the
|
||||
// first partition of the volume, (for example, xvda1) but you can
|
||||
// designate the entire block device by setting "mount_partition": "0" in
|
||||
// your config, which will mount xvda instead.
|
||||
MountPartition string `mapstructure:"mount_partition" required:"false"`
|
||||
// The path where the volume will be mounted. This is where the chroot
|
||||
// environment will be. This defaults to
|
||||
// `/mnt/packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration
|
||||
// template where the .Device variable is replaced with the name of the
|
||||
// device where the volume is attached.
|
||||
MountPath string `mapstructure:"mount_path" required:"false"`
|
||||
// As pre_mount_commands, but the commands are executed after mounting the
|
||||
// root device and before the extra mount and copy steps. The device and
|
||||
// mount path are provided by `{{.Device}}` and `{{.MountPath}}`.
|
||||
PostMountCommands []string `mapstructure:"post_mount_commands" required:"false"`
|
||||
// A series of commands to execute after attaching the root volume and
|
||||
// before mounting the chroot. This is not required unless using
|
||||
// from_scratch. If so, this should include any partitioning and filesystem
|
||||
// creation commands. The path to the device is provided by `{{.Device}}`.
|
||||
PreMountCommands []string `mapstructure:"pre_mount_commands" required:"false"`
|
||||
// The root device name. For example, xvda.
|
||||
RootDeviceName string `mapstructure:"root_device_name" required:"false"`
|
||||
// The size of the root volume in GB for the chroot environment and the
|
||||
// resulting AMI. Default size is the snapshot size of the source_ami
|
||||
// unless from_scratch is true, in which case this field must be defined.
|
||||
RootVolumeSize int64 `mapstructure:"root_volume_size" required:"false"`
|
||||
// The type of EBS volume for the chroot environment and resulting AMI. The
|
||||
// default value is the type of the source_ami, unless from_scratch is
|
||||
// true, in which case the default value is gp2. You can only specify io1
|
||||
// if building based on top of a source_ami which is also io1.
|
||||
RootVolumeType string `mapstructure:"root_volume_type" required:"false"`
|
||||
// The source AMI whose root volume will be copied and provisioned on the
|
||||
// currently running instance. This must be an EBS-backed AMI with a root
|
||||
// volume snapshot that you have access to. Note: this is not used when
|
||||
// from_scratch is set to true.
|
||||
SourceAmi string `mapstructure:"source_ami" required:"true"`
|
||||
// Filters used to populate the source_ami field. Example:
|
||||
//
|
||||
//```json
|
||||
//{
|
||||
// "source_ami_filter": {
|
||||
// "filters": {
|
||||
// "virtualization-type": "hvm",
|
||||
// "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
|
||||
// "root-device-type": "ebs"
|
||||
// },
|
||||
// "owners": ["099720109477"],
|
||||
// "most_recent": true
|
||||
// }
|
||||
//}
|
||||
//```
|
||||
//
|
||||
//This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
|
||||
//This will fail unless *exactly* one AMI is returned. In the above example,
|
||||
//`most_recent` will cause this to succeed by selecting the newest image.
|
||||
//
|
||||
//- `filters` (map of strings) - filters used to select a `source_ami`.
|
||||
// NOTE: This will fail unless *exactly* one AMI is returned. Any filter
|
||||
// described in the docs for
|
||||
// [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
|
||||
// is valid.
|
||||
//
|
||||
//- `owners` (array of strings) - Filters the images by their owner. You
|
||||
// may specify one or more AWS account IDs, "self" (which will use the
|
||||
// account whose credentials you are using to run Packer), or an AWS owner
|
||||
// alias: for example, "amazon", "aws-marketplace", or "microsoft". This
|
||||
// option is required for security reasons.
|
||||
//
|
||||
//- `most_recent` (boolean) - Selects the newest created image when true.
|
||||
// This is most useful for selecting a daily distro build.
|
||||
//
|
||||
//You may set this in place of `source_ami` or in conjunction with it. If you
|
||||
//set this in conjunction with `source_ami`, the `source_ami` will be added
|
||||
//to the filter. The provided `source_ami` must meet all of the filtering
|
||||
//criteria provided in `source_ami_filter`; this pins the AMI returned by the
|
||||
//filter, but will cause Packer to fail if the `source_ami` does not exist.
|
||||
SourceAmiFilter awscommon.AmiFilterOptions `mapstructure:"source_ami_filter" required:"false"`
|
||||
// Key/value pair tags to apply to the volumes that are *launched*. This is
|
||||
// a [template engine](/docs/templates/engine), see [Build template
|
||||
// data](#build-template-data) for more information.
|
||||
RootVolumeTags map[string]string `mapstructure:"root_volume_tags" required:"false"`
|
||||
// Same as [`root_volume_tags`](#root_volume_tags) but defined as a
|
||||
// singular block containing a `key` and a `value` field. In HCL2 mode the
|
||||
// [`dynamic_block`](/docs/configuration/from-1.5/expressions#dynamic-blocks)
|
||||
// will allow you to create those programatically.
|
||||
RootVolumeTag hcl2template.KeyValues `mapstructure:"root_volume_tag" required:"false"`
|
||||
// what architecture to use when registering the final AMI; valid options
|
||||
// are "x86_64" or "arm64". Defaults to "x86_64".
|
||||
Architecture string `mapstructure:"ami_architecture" required:"false"`
|
||||
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
func (c *Config) GetContext() interpolate.Context {
|
||||
return c.ctx
|
||||
}
|
||||
|
||||
type wrappedCommandTemplate struct {
|
||||
Command string
|
||||
}
|
||||
|
||||
type Builder struct {
|
||||
config Config
|
||||
runner multistep.Runner
|
||||
}
|
||||
|
||||
func (b *Builder) ConfigSpec() hcldec.ObjectSpec { return b.config.FlatMapstructure().HCL2Spec() }
|
||||
|
||||
func (b *Builder) Prepare(raws ...interface{}) ([]string, []string, error) {
|
||||
b.config.ctx.Funcs = awscommon.TemplateFuncs
|
||||
err := config.Decode(&b.config, &config.DecodeOpts{
|
||||
PluginType: BuilderId,
|
||||
Interpolate: true,
|
||||
InterpolateContext: &b.config.ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{
|
||||
"ami_description",
|
||||
"snapshot_tags",
|
||||
"snapshot_tag",
|
||||
"tags",
|
||||
"tag",
|
||||
"root_volume_tags",
|
||||
"root_volume_tag",
|
||||
"command_wrapper",
|
||||
"post_mount_commands",
|
||||
"pre_mount_commands",
|
||||
"mount_path",
|
||||
},
|
||||
},
|
||||
}, raws...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if b.config.Architecture == "" {
|
||||
b.config.Architecture = "x86_64"
|
||||
}
|
||||
|
||||
if b.config.PackerConfig.PackerForce {
|
||||
b.config.AMIForceDeregister = true
|
||||
}
|
||||
|
||||
// Defaults
|
||||
if b.config.ChrootMounts == nil {
|
||||
b.config.ChrootMounts = make([][]string, 0)
|
||||
}
|
||||
|
||||
if len(b.config.ChrootMounts) == 0 {
|
||||
b.config.ChrootMounts = [][]string{
|
||||
{"proc", "proc", "/proc"},
|
||||
{"sysfs", "sysfs", "/sys"},
|
||||
{"bind", "/dev", "/dev"},
|
||||
{"devpts", "devpts", "/dev/pts"},
|
||||
{"binfmt_misc", "binfmt_misc", "/proc/sys/fs/binfmt_misc"},
|
||||
}
|
||||
}
|
||||
|
||||
// set default copy file if we're not giving our own
|
||||
if b.config.CopyFiles == nil {
|
||||
if !b.config.FromScratch {
|
||||
b.config.CopyFiles = []string{"/etc/resolv.conf"}
|
||||
}
|
||||
}
|
||||
|
||||
if b.config.CommandWrapper == "" {
|
||||
b.config.CommandWrapper = "{{.Command}}"
|
||||
}
|
||||
|
||||
if b.config.MountPath == "" {
|
||||
b.config.MountPath = "/mnt/packer-amazon-chroot-volumes/{{.Device}}"
|
||||
}
|
||||
|
||||
if b.config.MountPartition == "" {
|
||||
b.config.MountPartition = "1"
|
||||
}
|
||||
|
||||
// Accumulate any errors or warnings
|
||||
var errs *packer.MultiError
|
||||
var warns []string
|
||||
|
||||
errs = packer.MultiErrorAppend(errs, b.config.RootVolumeTag.CopyOn(&b.config.RootVolumeTags)...)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.SourceAmiFilter.Prepare()...)
|
||||
|
||||
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...)
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
b.config.AMIConfig.Prepare(&b.config.AccessConfig, &b.config.ctx)...)
|
||||
|
||||
for _, mounts := range b.config.ChrootMounts {
|
||||
if len(mounts) != 3 {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("Each chroot_mounts entry should be three elements."))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if b.config.FromScratch {
|
||||
if b.config.SourceAmi != "" || !b.config.SourceAmiFilter.Empty() {
|
||||
warns = append(warns, "source_ami and source_ami_filter are unused when from_scratch is true")
|
||||
}
|
||||
if b.config.RootVolumeSize == 0 {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("root_volume_size is required with from_scratch."))
|
||||
}
|
||||
if len(b.config.PreMountCommands) == 0 {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("pre_mount_commands is required with from_scratch."))
|
||||
}
|
||||
if b.config.AMIVirtType == "" {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("ami_virtualization_type is required with from_scratch."))
|
||||
}
|
||||
if b.config.RootDeviceName == "" {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("root_device_name is required with from_scratch."))
|
||||
}
|
||||
if len(b.config.AMIMappings) == 0 {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("ami_block_device_mappings is required with from_scratch."))
|
||||
}
|
||||
} else {
|
||||
if b.config.SourceAmi == "" && b.config.SourceAmiFilter.Empty() {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("source_ami or source_ami_filter is required."))
|
||||
}
|
||||
if len(b.config.AMIMappings) > 0 && b.config.RootDeviceName != "" {
|
||||
if b.config.RootVolumeSize == 0 {
|
||||
// Although, they can specify the device size in the block
|
||||
// device mapping, it's easier to be specific here.
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("root_volume_size is required if ami_block_device_mappings is specified"))
|
||||
}
|
||||
warns = append(warns, "ami_block_device_mappings from source image will be completely overwritten")
|
||||
} else if len(b.config.AMIMappings) > 0 {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("If ami_block_device_mappings is specified, root_device_name must be specified"))
|
||||
} else if b.config.RootDeviceName != "" {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("If root_device_name is specified, ami_block_device_mappings must be specified"))
|
||||
}
|
||||
}
|
||||
valid := false
|
||||
for _, validArch := range []string{"x86_64", "arm64"} {
|
||||
if validArch == b.config.Architecture {
|
||||
valid = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !valid {
|
||||
errs = packer.MultiErrorAppend(errs, errors.New(`The only valid ami_architecture values are "x86_64" and "arm64"`))
|
||||
}
|
||||
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return nil, warns, errs
|
||||
}
|
||||
|
||||
packer.LogSecretFilter.Set(b.config.AccessKey, b.config.SecretKey, b.config.Token)
|
||||
generatedData := awscommon.GetGeneratedDataList()
|
||||
generatedData = append(generatedData, "Device", "MountPath")
|
||||
|
||||
return generatedData, warns, nil
|
||||
}
|
||||
|
||||
func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (packer.Artifact, error) {
|
||||
if runtime.GOOS != "linux" {
|
||||
return nil, errors.New("The amazon-chroot builder only works on Linux environments.")
|
||||
}
|
||||
|
||||
session, err := b.config.Session()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ec2conn := ec2.New(session)
|
||||
|
||||
wrappedCommand := func(command string) (string, error) {
|
||||
ictx := b.config.ctx
|
||||
ictx.Data = &wrappedCommandTemplate{Command: command}
|
||||
return interpolate.Render(b.config.CommandWrapper, &ictx)
|
||||
}
|
||||
|
||||
// Setup the state bag and initial state for the steps
|
||||
state := new(multistep.BasicStateBag)
|
||||
state.Put("config", &b.config)
|
||||
state.Put("access_config", &b.config.AccessConfig)
|
||||
state.Put("ami_config", &b.config.AMIConfig)
|
||||
state.Put("ec2", ec2conn)
|
||||
state.Put("awsSession", session)
|
||||
state.Put("hook", hook)
|
||||
state.Put("ui", ui)
|
||||
state.Put("wrappedCommand", common.CommandWrapper(wrappedCommand))
|
||||
generatedData := &packerbuilderdata.GeneratedData{State: state}
|
||||
|
||||
// Build the steps
|
||||
steps := []multistep.Step{
|
||||
&awscommon.StepPreValidate{
|
||||
DestAmiName: b.config.AMIName,
|
||||
ForceDeregister: b.config.AMIForceDeregister,
|
||||
},
|
||||
&StepInstanceInfo{},
|
||||
}
|
||||
|
||||
if !b.config.FromScratch {
|
||||
steps = append(steps,
|
||||
&awscommon.StepSourceAMIInfo{
|
||||
SourceAmi: b.config.SourceAmi,
|
||||
EnableAMISriovNetSupport: b.config.AMISriovNetSupport,
|
||||
EnableAMIENASupport: b.config.AMIENASupport,
|
||||
AmiFilters: b.config.SourceAmiFilter,
|
||||
AMIVirtType: b.config.AMIVirtType,
|
||||
},
|
||||
&StepCheckRootDevice{},
|
||||
)
|
||||
}
|
||||
|
||||
steps = append(steps,
|
||||
&StepFlock{},
|
||||
&StepPrepareDevice{
|
||||
GeneratedData: generatedData,
|
||||
},
|
||||
&StepCreateVolume{
|
||||
PollingConfig: b.config.PollingConfig,
|
||||
RootVolumeType: b.config.RootVolumeType,
|
||||
RootVolumeSize: b.config.RootVolumeSize,
|
||||
RootVolumeTags: b.config.RootVolumeTags,
|
||||
Ctx: b.config.ctx,
|
||||
},
|
||||
&StepAttachVolume{
|
||||
PollingConfig: b.config.PollingConfig,
|
||||
},
|
||||
&StepEarlyUnflock{},
|
||||
&chroot.StepPreMountCommands{
|
||||
Commands: b.config.PreMountCommands,
|
||||
},
|
||||
&StepMountDevice{
|
||||
MountOptions: b.config.MountOptions,
|
||||
MountPartition: b.config.MountPartition,
|
||||
GeneratedData: generatedData,
|
||||
},
|
||||
&chroot.StepPostMountCommands{
|
||||
Commands: b.config.PostMountCommands,
|
||||
},
|
||||
&chroot.StepMountExtra{
|
||||
ChrootMounts: b.config.ChrootMounts,
|
||||
},
|
||||
&chroot.StepCopyFiles{
|
||||
Files: b.config.CopyFiles,
|
||||
},
|
||||
&awscommon.StepSetGeneratedData{
|
||||
GeneratedData: generatedData,
|
||||
},
|
||||
&chroot.StepChrootProvision{},
|
||||
&chroot.StepEarlyCleanup{},
|
||||
&StepSnapshot{
|
||||
PollingConfig: b.config.PollingConfig,
|
||||
},
|
||||
&awscommon.StepDeregisterAMI{
|
||||
AccessConfig: &b.config.AccessConfig,
|
||||
ForceDeregister: b.config.AMIForceDeregister,
|
||||
ForceDeleteSnapshot: b.config.AMIForceDeleteSnapshot,
|
||||
AMIName: b.config.AMIName,
|
||||
Regions: b.config.AMIRegions,
|
||||
},
|
||||
&StepRegisterAMI{
|
||||
RootVolumeSize: b.config.RootVolumeSize,
|
||||
EnableAMISriovNetSupport: b.config.AMISriovNetSupport,
|
||||
EnableAMIENASupport: b.config.AMIENASupport,
|
||||
AMISkipBuildRegion: b.config.AMISkipBuildRegion,
|
||||
PollingConfig: b.config.PollingConfig,
|
||||
},
|
||||
&awscommon.StepAMIRegionCopy{
|
||||
AccessConfig: &b.config.AccessConfig,
|
||||
Regions: b.config.AMIRegions,
|
||||
AMIKmsKeyId: b.config.AMIKmsKeyId,
|
||||
RegionKeyIds: b.config.AMIRegionKMSKeyIDs,
|
||||
EncryptBootVolume: b.config.AMIEncryptBootVolume,
|
||||
Name: b.config.AMIName,
|
||||
OriginalRegion: *ec2conn.Config.Region,
|
||||
},
|
||||
&awscommon.StepModifyAMIAttributes{
|
||||
Description: b.config.AMIDescription,
|
||||
Users: b.config.AMIUsers,
|
||||
Groups: b.config.AMIGroups,
|
||||
ProductCodes: b.config.AMIProductCodes,
|
||||
SnapshotUsers: b.config.SnapshotUsers,
|
||||
SnapshotGroups: b.config.SnapshotGroups,
|
||||
Ctx: b.config.ctx,
|
||||
GeneratedData: generatedData,
|
||||
},
|
||||
&awscommon.StepCreateTags{
|
||||
Tags: b.config.AMITags,
|
||||
SnapshotTags: b.config.SnapshotTags,
|
||||
Ctx: b.config.ctx,
|
||||
},
|
||||
)
|
||||
|
||||
// Run!
|
||||
b.runner = commonsteps.NewRunner(steps, b.config.PackerConfig, ui)
|
||||
b.runner.Run(ctx, state)
|
||||
|
||||
// If there was an error, return that
|
||||
if rawErr, ok := state.GetOk("error"); ok {
|
||||
return nil, rawErr.(error)
|
||||
}
|
||||
|
||||
// If there are no AMIs, then just return
|
||||
if _, ok := state.GetOk("amis"); !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Build the artifact and return it
|
||||
artifact := &awscommon.Artifact{
|
||||
Amis: state.Get("amis").(map[string]string),
|
||||
BuilderIdValue: BuilderId,
|
||||
Session: session,
|
||||
StateData: map[string]interface{}{"generated_data": state.Get("generated_data")},
|
||||
}
|
||||
|
||||
return artifact, nil
|
||||
}
|
162
builder/amazon/chroot/builder.hcl2spec.go
Normal file
162
builder/amazon/chroot/builder.hcl2spec.go
Normal file
@ -0,0 +1,162 @@
|
||||
// Code generated by "mapstructure-to-hcl2 -type Config,BlockDevices,BlockDevice"; DO NOT EDIT.
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer/builder/amazon/common"
|
||||
"github.com/hashicorp/packer/hcl2template"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatConfig struct {
|
||||
PackerBuildName *string `mapstructure:"packer_build_name" cty:"packer_build_name" hcl:"packer_build_name"`
|
||||
PackerBuilderType *string `mapstructure:"packer_builder_type" cty:"packer_builder_type" hcl:"packer_builder_type"`
|
||||
PackerCoreVersion *string `mapstructure:"packer_core_version" cty:"packer_core_version" hcl:"packer_core_version"`
|
||||
PackerDebug *bool `mapstructure:"packer_debug" cty:"packer_debug" hcl:"packer_debug"`
|
||||
PackerForce *bool `mapstructure:"packer_force" cty:"packer_force" hcl:"packer_force"`
|
||||
PackerOnError *string `mapstructure:"packer_on_error" cty:"packer_on_error" hcl:"packer_on_error"`
|
||||
PackerUserVars map[string]string `mapstructure:"packer_user_variables" cty:"packer_user_variables" hcl:"packer_user_variables"`
|
||||
PackerSensitiveVars []string `mapstructure:"packer_sensitive_variables" cty:"packer_sensitive_variables" hcl:"packer_sensitive_variables"`
|
||||
AMIName *string `mapstructure:"ami_name" required:"true" cty:"ami_name" hcl:"ami_name"`
|
||||
AMIDescription *string `mapstructure:"ami_description" required:"false" cty:"ami_description" hcl:"ami_description"`
|
||||
AMIVirtType *string `mapstructure:"ami_virtualization_type" required:"false" cty:"ami_virtualization_type" hcl:"ami_virtualization_type"`
|
||||
AMIUsers []string `mapstructure:"ami_users" required:"false" cty:"ami_users" hcl:"ami_users"`
|
||||
AMIGroups []string `mapstructure:"ami_groups" required:"false" cty:"ami_groups" hcl:"ami_groups"`
|
||||
AMIProductCodes []string `mapstructure:"ami_product_codes" required:"false" cty:"ami_product_codes" hcl:"ami_product_codes"`
|
||||
AMIRegions []string `mapstructure:"ami_regions" required:"false" cty:"ami_regions" hcl:"ami_regions"`
|
||||
AMISkipRegionValidation *bool `mapstructure:"skip_region_validation" required:"false" cty:"skip_region_validation" hcl:"skip_region_validation"`
|
||||
AMITags map[string]string `mapstructure:"tags" required:"false" cty:"tags" hcl:"tags"`
|
||||
AMITag []hcl2template.FlatKeyValue `mapstructure:"tag" required:"false" cty:"tag" hcl:"tag"`
|
||||
AMIENASupport *bool `mapstructure:"ena_support" required:"false" cty:"ena_support" hcl:"ena_support"`
|
||||
AMISriovNetSupport *bool `mapstructure:"sriov_support" required:"false" cty:"sriov_support" hcl:"sriov_support"`
|
||||
AMIForceDeregister *bool `mapstructure:"force_deregister" required:"false" cty:"force_deregister" hcl:"force_deregister"`
|
||||
AMIForceDeleteSnapshot *bool `mapstructure:"force_delete_snapshot" required:"false" cty:"force_delete_snapshot" hcl:"force_delete_snapshot"`
|
||||
AMIEncryptBootVolume *bool `mapstructure:"encrypt_boot" required:"false" cty:"encrypt_boot" hcl:"encrypt_boot"`
|
||||
AMIKmsKeyId *string `mapstructure:"kms_key_id" required:"false" cty:"kms_key_id" hcl:"kms_key_id"`
|
||||
AMIRegionKMSKeyIDs map[string]string `mapstructure:"region_kms_key_ids" required:"false" cty:"region_kms_key_ids" hcl:"region_kms_key_ids"`
|
||||
AMISkipBuildRegion *bool `mapstructure:"skip_save_build_region" cty:"skip_save_build_region" hcl:"skip_save_build_region"`
|
||||
SnapshotTags map[string]string `mapstructure:"snapshot_tags" required:"false" cty:"snapshot_tags" hcl:"snapshot_tags"`
|
||||
SnapshotTag []hcl2template.FlatKeyValue `mapstructure:"snapshot_tag" required:"false" cty:"snapshot_tag" hcl:"snapshot_tag"`
|
||||
SnapshotUsers []string `mapstructure:"snapshot_users" required:"false" cty:"snapshot_users" hcl:"snapshot_users"`
|
||||
SnapshotGroups []string `mapstructure:"snapshot_groups" required:"false" cty:"snapshot_groups" hcl:"snapshot_groups"`
|
||||
AccessKey *string `mapstructure:"access_key" required:"true" cty:"access_key" hcl:"access_key"`
|
||||
AssumeRole *common.FlatAssumeRoleConfig `mapstructure:"assume_role" required:"false" cty:"assume_role" hcl:"assume_role"`
|
||||
CustomEndpointEc2 *string `mapstructure:"custom_endpoint_ec2" required:"false" cty:"custom_endpoint_ec2" hcl:"custom_endpoint_ec2"`
|
||||
CredsFilename *string `mapstructure:"shared_credentials_file" required:"false" cty:"shared_credentials_file" hcl:"shared_credentials_file"`
|
||||
DecodeAuthZMessages *bool `mapstructure:"decode_authorization_messages" required:"false" cty:"decode_authorization_messages" hcl:"decode_authorization_messages"`
|
||||
InsecureSkipTLSVerify *bool `mapstructure:"insecure_skip_tls_verify" required:"false" cty:"insecure_skip_tls_verify" hcl:"insecure_skip_tls_verify"`
|
||||
MaxRetries *int `mapstructure:"max_retries" required:"false" cty:"max_retries" hcl:"max_retries"`
|
||||
MFACode *string `mapstructure:"mfa_code" required:"false" cty:"mfa_code" hcl:"mfa_code"`
|
||||
ProfileName *string `mapstructure:"profile" required:"false" cty:"profile" hcl:"profile"`
|
||||
RawRegion *string `mapstructure:"region" required:"true" cty:"region" hcl:"region"`
|
||||
SecretKey *string `mapstructure:"secret_key" required:"true" cty:"secret_key" hcl:"secret_key"`
|
||||
SkipMetadataApiCheck *bool `mapstructure:"skip_metadata_api_check" cty:"skip_metadata_api_check" hcl:"skip_metadata_api_check"`
|
||||
SkipCredsValidation *bool `mapstructure:"skip_credential_validation" cty:"skip_credential_validation" hcl:"skip_credential_validation"`
|
||||
Token *string `mapstructure:"token" required:"false" cty:"token" hcl:"token"`
|
||||
VaultAWSEngine *common.FlatVaultAWSEngineOptions `mapstructure:"vault_aws_engine" required:"false" cty:"vault_aws_engine" hcl:"vault_aws_engine"`
|
||||
PollingConfig *common.FlatAWSPollingConfig `mapstructure:"aws_polling" required:"false" cty:"aws_polling" hcl:"aws_polling"`
|
||||
AMIMappings []common.FlatBlockDevice `mapstructure:"ami_block_device_mappings" hcl2-schema-generator:"ami_block_device_mappings,direct" required:"false" cty:"ami_block_device_mappings" hcl:"ami_block_device_mappings"`
|
||||
ChrootMounts [][]string `mapstructure:"chroot_mounts" required:"false" cty:"chroot_mounts" hcl:"chroot_mounts"`
|
||||
CommandWrapper *string `mapstructure:"command_wrapper" required:"false" cty:"command_wrapper" hcl:"command_wrapper"`
|
||||
CopyFiles []string `mapstructure:"copy_files" required:"false" cty:"copy_files" hcl:"copy_files"`
|
||||
DevicePath *string `mapstructure:"device_path" required:"false" cty:"device_path" hcl:"device_path"`
|
||||
NVMEDevicePath *string `mapstructure:"nvme_device_path" required:"false" cty:"nvme_device_path" hcl:"nvme_device_path"`
|
||||
FromScratch *bool `mapstructure:"from_scratch" required:"false" cty:"from_scratch" hcl:"from_scratch"`
|
||||
MountOptions []string `mapstructure:"mount_options" required:"false" cty:"mount_options" hcl:"mount_options"`
|
||||
MountPartition *string `mapstructure:"mount_partition" required:"false" cty:"mount_partition" hcl:"mount_partition"`
|
||||
MountPath *string `mapstructure:"mount_path" required:"false" cty:"mount_path" hcl:"mount_path"`
|
||||
PostMountCommands []string `mapstructure:"post_mount_commands" required:"false" cty:"post_mount_commands" hcl:"post_mount_commands"`
|
||||
PreMountCommands []string `mapstructure:"pre_mount_commands" required:"false" cty:"pre_mount_commands" hcl:"pre_mount_commands"`
|
||||
RootDeviceName *string `mapstructure:"root_device_name" required:"false" cty:"root_device_name" hcl:"root_device_name"`
|
||||
RootVolumeSize *int64 `mapstructure:"root_volume_size" required:"false" cty:"root_volume_size" hcl:"root_volume_size"`
|
||||
RootVolumeType *string `mapstructure:"root_volume_type" required:"false" cty:"root_volume_type" hcl:"root_volume_type"`
|
||||
SourceAmi *string `mapstructure:"source_ami" required:"true" cty:"source_ami" hcl:"source_ami"`
|
||||
SourceAmiFilter *common.FlatAmiFilterOptions `mapstructure:"source_ami_filter" required:"false" cty:"source_ami_filter" hcl:"source_ami_filter"`
|
||||
RootVolumeTags map[string]string `mapstructure:"root_volume_tags" required:"false" cty:"root_volume_tags" hcl:"root_volume_tags"`
|
||||
RootVolumeTag []hcl2template.FlatKeyValue `mapstructure:"root_volume_tag" required:"false" cty:"root_volume_tag" hcl:"root_volume_tag"`
|
||||
Architecture *string `mapstructure:"ami_architecture" required:"false" cty:"ami_architecture" hcl:"ami_architecture"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatConfig.
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*Config) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatConfig)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a Config.
|
||||
// This spec is used by HCL to read the fields of Config.
|
||||
// The decoded values from this spec will then be applied to a FlatConfig.
|
||||
func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"packer_build_name": &hcldec.AttrSpec{Name: "packer_build_name", Type: cty.String, Required: false},
|
||||
"packer_builder_type": &hcldec.AttrSpec{Name: "packer_builder_type", Type: cty.String, Required: false},
|
||||
"packer_core_version": &hcldec.AttrSpec{Name: "packer_core_version", Type: cty.String, Required: false},
|
||||
"packer_debug": &hcldec.AttrSpec{Name: "packer_debug", Type: cty.Bool, Required: false},
|
||||
"packer_force": &hcldec.AttrSpec{Name: "packer_force", Type: cty.Bool, Required: false},
|
||||
"packer_on_error": &hcldec.AttrSpec{Name: "packer_on_error", Type: cty.String, Required: false},
|
||||
"packer_user_variables": &hcldec.AttrSpec{Name: "packer_user_variables", Type: cty.Map(cty.String), Required: false},
|
||||
"packer_sensitive_variables": &hcldec.AttrSpec{Name: "packer_sensitive_variables", Type: cty.List(cty.String), Required: false},
|
||||
"ami_name": &hcldec.AttrSpec{Name: "ami_name", Type: cty.String, Required: false},
|
||||
"ami_description": &hcldec.AttrSpec{Name: "ami_description", Type: cty.String, Required: false},
|
||||
"ami_virtualization_type": &hcldec.AttrSpec{Name: "ami_virtualization_type", Type: cty.String, Required: false},
|
||||
"ami_users": &hcldec.AttrSpec{Name: "ami_users", Type: cty.List(cty.String), Required: false},
|
||||
"ami_groups": &hcldec.AttrSpec{Name: "ami_groups", Type: cty.List(cty.String), Required: false},
|
||||
"ami_product_codes": &hcldec.AttrSpec{Name: "ami_product_codes", Type: cty.List(cty.String), Required: false},
|
||||
"ami_regions": &hcldec.AttrSpec{Name: "ami_regions", Type: cty.List(cty.String), Required: false},
|
||||
"skip_region_validation": &hcldec.AttrSpec{Name: "skip_region_validation", Type: cty.Bool, Required: false},
|
||||
"tags": &hcldec.AttrSpec{Name: "tags", Type: cty.Map(cty.String), Required: false},
|
||||
"tag": &hcldec.BlockListSpec{TypeName: "tag", Nested: hcldec.ObjectSpec((*hcl2template.FlatKeyValue)(nil).HCL2Spec())},
|
||||
"ena_support": &hcldec.AttrSpec{Name: "ena_support", Type: cty.Bool, Required: false},
|
||||
"sriov_support": &hcldec.AttrSpec{Name: "sriov_support", Type: cty.Bool, Required: false},
|
||||
"force_deregister": &hcldec.AttrSpec{Name: "force_deregister", Type: cty.Bool, Required: false},
|
||||
"force_delete_snapshot": &hcldec.AttrSpec{Name: "force_delete_snapshot", Type: cty.Bool, Required: false},
|
||||
"encrypt_boot": &hcldec.AttrSpec{Name: "encrypt_boot", Type: cty.Bool, Required: false},
|
||||
"kms_key_id": &hcldec.AttrSpec{Name: "kms_key_id", Type: cty.String, Required: false},
|
||||
"region_kms_key_ids": &hcldec.AttrSpec{Name: "region_kms_key_ids", Type: cty.Map(cty.String), Required: false},
|
||||
"skip_save_build_region": &hcldec.AttrSpec{Name: "skip_save_build_region", Type: cty.Bool, Required: false},
|
||||
"snapshot_tags": &hcldec.AttrSpec{Name: "snapshot_tags", Type: cty.Map(cty.String), Required: false},
|
||||
"snapshot_tag": &hcldec.BlockListSpec{TypeName: "snapshot_tag", Nested: hcldec.ObjectSpec((*hcl2template.FlatKeyValue)(nil).HCL2Spec())},
|
||||
"snapshot_users": &hcldec.AttrSpec{Name: "snapshot_users", Type: cty.List(cty.String), Required: false},
|
||||
"snapshot_groups": &hcldec.AttrSpec{Name: "snapshot_groups", Type: cty.List(cty.String), Required: false},
|
||||
"access_key": &hcldec.AttrSpec{Name: "access_key", Type: cty.String, Required: false},
|
||||
"assume_role": &hcldec.BlockSpec{TypeName: "assume_role", Nested: hcldec.ObjectSpec((*common.FlatAssumeRoleConfig)(nil).HCL2Spec())},
|
||||
"custom_endpoint_ec2": &hcldec.AttrSpec{Name: "custom_endpoint_ec2", Type: cty.String, Required: false},
|
||||
"shared_credentials_file": &hcldec.AttrSpec{Name: "shared_credentials_file", Type: cty.String, Required: false},
|
||||
"decode_authorization_messages": &hcldec.AttrSpec{Name: "decode_authorization_messages", Type: cty.Bool, Required: false},
|
||||
"insecure_skip_tls_verify": &hcldec.AttrSpec{Name: "insecure_skip_tls_verify", Type: cty.Bool, Required: false},
|
||||
"max_retries": &hcldec.AttrSpec{Name: "max_retries", Type: cty.Number, Required: false},
|
||||
"mfa_code": &hcldec.AttrSpec{Name: "mfa_code", Type: cty.String, Required: false},
|
||||
"profile": &hcldec.AttrSpec{Name: "profile", Type: cty.String, Required: false},
|
||||
"region": &hcldec.AttrSpec{Name: "region", Type: cty.String, Required: false},
|
||||
"secret_key": &hcldec.AttrSpec{Name: "secret_key", Type: cty.String, Required: false},
|
||||
"skip_metadata_api_check": &hcldec.AttrSpec{Name: "skip_metadata_api_check", Type: cty.Bool, Required: false},
|
||||
"skip_credential_validation": &hcldec.AttrSpec{Name: "skip_credential_validation", Type: cty.Bool, Required: false},
|
||||
"token": &hcldec.AttrSpec{Name: "token", Type: cty.String, Required: false},
|
||||
"vault_aws_engine": &hcldec.BlockSpec{TypeName: "vault_aws_engine", Nested: hcldec.ObjectSpec((*common.FlatVaultAWSEngineOptions)(nil).HCL2Spec())},
|
||||
"aws_polling": &hcldec.BlockSpec{TypeName: "aws_polling", Nested: hcldec.ObjectSpec((*common.FlatAWSPollingConfig)(nil).HCL2Spec())},
|
||||
"ami_block_device_mappings": &hcldec.BlockListSpec{TypeName: "ami_block_device_mappings", Nested: hcldec.ObjectSpec((*common.FlatBlockDevice)(nil).HCL2Spec())},
|
||||
"chroot_mounts": &hcldec.AttrSpec{Name: "chroot_mounts", Type: cty.List(cty.List(cty.String)), Required: false},
|
||||
"command_wrapper": &hcldec.AttrSpec{Name: "command_wrapper", Type: cty.String, Required: false},
|
||||
"copy_files": &hcldec.AttrSpec{Name: "copy_files", Type: cty.List(cty.String), Required: false},
|
||||
"device_path": &hcldec.AttrSpec{Name: "device_path", Type: cty.String, Required: false},
|
||||
"nvme_device_path": &hcldec.AttrSpec{Name: "nvme_device_path", Type: cty.String, Required: false},
|
||||
"from_scratch": &hcldec.AttrSpec{Name: "from_scratch", Type: cty.Bool, Required: false},
|
||||
"mount_options": &hcldec.AttrSpec{Name: "mount_options", Type: cty.List(cty.String), Required: false},
|
||||
"mount_partition": &hcldec.AttrSpec{Name: "mount_partition", Type: cty.String, Required: false},
|
||||
"mount_path": &hcldec.AttrSpec{Name: "mount_path", Type: cty.String, Required: false},
|
||||
"post_mount_commands": &hcldec.AttrSpec{Name: "post_mount_commands", Type: cty.List(cty.String), Required: false},
|
||||
"pre_mount_commands": &hcldec.AttrSpec{Name: "pre_mount_commands", Type: cty.List(cty.String), Required: false},
|
||||
"root_device_name": &hcldec.AttrSpec{Name: "root_device_name", Type: cty.String, Required: false},
|
||||
"root_volume_size": &hcldec.AttrSpec{Name: "root_volume_size", Type: cty.Number, Required: false},
|
||||
"root_volume_type": &hcldec.AttrSpec{Name: "root_volume_type", Type: cty.String, Required: false},
|
||||
"source_ami": &hcldec.AttrSpec{Name: "source_ami", Type: cty.String, Required: false},
|
||||
"source_ami_filter": &hcldec.BlockSpec{TypeName: "source_ami_filter", Nested: hcldec.ObjectSpec((*common.FlatAmiFilterOptions)(nil).HCL2Spec())},
|
||||
"root_volume_tags": &hcldec.AttrSpec{Name: "root_volume_tags", Type: cty.Map(cty.String), Required: false},
|
||||
"root_volume_tag": &hcldec.BlockListSpec{TypeName: "root_volume_tag", Nested: hcldec.ObjectSpec((*hcl2template.FlatKeyValue)(nil).HCL2Spec())},
|
||||
"ami_architecture": &hcldec.AttrSpec{Name: "ami_architecture", Type: cty.String, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
251
builder/amazon/chroot/builder_test.go
Normal file
251
builder/amazon/chroot/builder_test.go
Normal file
@ -0,0 +1,251 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
func testConfig() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"ami_name": "foo",
|
||||
"source_ami": "foo",
|
||||
"region": "us-east-1",
|
||||
// region validation logic is checked in ami_config_test
|
||||
"skip_region_validation": true,
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilder_ImplementsBuilder(t *testing.T) {
|
||||
var raw interface{}
|
||||
raw = &Builder{}
|
||||
if _, ok := raw.(packer.Builder); !ok {
|
||||
t.Fatalf("Builder should be a builder")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_AMIName(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
// Test good
|
||||
config["ami_name"] = "foo"
|
||||
config["skip_region_validation"] = true
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
// Test bad
|
||||
config["ami_name"] = "foo {{"
|
||||
b = Builder{}
|
||||
_, warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
|
||||
// Test bad
|
||||
delete(config, "ami_name")
|
||||
b = Builder{}
|
||||
_, warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_ChrootMounts(t *testing.T) {
|
||||
b := &Builder{}
|
||||
config := testConfig()
|
||||
|
||||
config["chroot_mounts"] = nil
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_ChrootMountsBadDefaults(t *testing.T) {
|
||||
b := &Builder{}
|
||||
config := testConfig()
|
||||
|
||||
config["chroot_mounts"] = [][]string{
|
||||
{"bad"},
|
||||
}
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
}
|
||||
func TestBuilderPrepare_SourceAmi(t *testing.T) {
|
||||
b := &Builder{}
|
||||
config := testConfig()
|
||||
|
||||
config["source_ami"] = ""
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
|
||||
config["source_ami"] = "foo"
|
||||
_, warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_CommandWrapper(t *testing.T) {
|
||||
b := &Builder{}
|
||||
config := testConfig()
|
||||
|
||||
config["command_wrapper"] = "echo hi; {{.Command}}"
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_CopyFiles(t *testing.T) {
|
||||
b := &Builder{}
|
||||
config := testConfig()
|
||||
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("err: %s", err)
|
||||
}
|
||||
|
||||
if len(b.config.CopyFiles) != 1 && b.config.CopyFiles[0] != "/etc/resolv.conf" {
|
||||
t.Errorf("Was expecting default value for copy_files.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_CopyFilesNoDefault(t *testing.T) {
|
||||
b := &Builder{}
|
||||
config := testConfig()
|
||||
|
||||
config["copy_files"] = []string{}
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("err: %s", err)
|
||||
}
|
||||
|
||||
if len(b.config.CopyFiles) > 0 {
|
||||
t.Errorf("Was expecting no default value for copy_files. Found %v",
|
||||
b.config.CopyFiles)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_RootDeviceNameAndAMIMappings(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
config["root_device_name"] = "/dev/sda"
|
||||
config["ami_block_device_mappings"] = []interface{}{map[string]string{}}
|
||||
config["root_volume_size"] = 15
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) == 0 {
|
||||
t.Fatal("Missing warning, stating block device mappings will be overwritten")
|
||||
} else if len(warnings) > 1 {
|
||||
t.Fatalf("excessive warnings: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_AMIMappingsNoRootDeviceName(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
config["ami_block_device_mappings"] = []interface{}{map[string]string{}}
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatalf("should have error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_RootDeviceNameNoAMIMappings(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
config["root_device_name"] = "/dev/sda"
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatalf("should have error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_ReturnGeneratedData(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
generatedData, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
if len(generatedData) == 0 {
|
||||
t.Fatalf("Generated data should not be empty")
|
||||
}
|
||||
if generatedData[0] != "SourceAMIName" {
|
||||
t.Fatalf("Generated data should contain SourceAMIName")
|
||||
}
|
||||
if generatedData[1] != "BuildRegion" {
|
||||
t.Fatalf("Generated data should contain BuildRegion")
|
||||
}
|
||||
if generatedData[2] != "SourceAMI" {
|
||||
t.Fatalf("Generated data should contain SourceAMI")
|
||||
}
|
||||
if generatedData[3] != "SourceAMICreationDate" {
|
||||
t.Fatalf("Generated data should contain SourceAMICreationDate")
|
||||
}
|
||||
if generatedData[4] != "SourceAMIOwner" {
|
||||
t.Fatalf("Generated data should contain SourceAMIOwner")
|
||||
}
|
||||
if generatedData[5] != "SourceAMIOwnerName" {
|
||||
t.Fatalf("Generated data should contain SourceAMIOwnerName")
|
||||
}
|
||||
if generatedData[6] != "Device" {
|
||||
t.Fatalf("Generated data should contain Device")
|
||||
}
|
||||
if generatedData[7] != "MountPath" {
|
||||
t.Fatalf("Generated data should contain MountPath")
|
||||
}
|
||||
}
|
1
builder/amazon/chroot/copy_files.go
Normal file
1
builder/amazon/chroot/copy_files.go
Normal file
@ -0,0 +1 @@
|
||||
package chroot
|
51
builder/amazon/chroot/copy_files_test.go
Normal file
51
builder/amazon/chroot/copy_files_test.go
Normal file
@ -0,0 +1,51 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/common"
|
||||
)
|
||||
|
||||
func TestCopyFile(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
return
|
||||
}
|
||||
|
||||
first, err := ioutil.TempFile("", "copy_files_test")
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't create temp file.")
|
||||
}
|
||||
defer os.Remove(first.Name())
|
||||
newName := first.Name() + "-new"
|
||||
|
||||
payload := "copy_files_test.go payload"
|
||||
if _, err = first.WriteString(payload); err != nil {
|
||||
t.Fatalf("Couldn't write payload to first file.")
|
||||
}
|
||||
first.Sync()
|
||||
|
||||
cmd := common.ShellCommand(fmt.Sprintf("cp %s %s", first.Name(), newName))
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("Couldn't copy file")
|
||||
}
|
||||
defer os.Remove(newName)
|
||||
|
||||
second, err := os.Open(newName)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't open copied file.")
|
||||
}
|
||||
defer second.Close()
|
||||
|
||||
var copiedPayload = make([]byte, len(payload))
|
||||
if _, err := second.Read(copiedPayload); err != nil {
|
||||
t.Fatalf("Couldn't open copied file for reading.")
|
||||
}
|
||||
|
||||
if string(copiedPayload) != payload {
|
||||
t.Fatalf("payload not copied.")
|
||||
}
|
||||
}
|
70
builder/amazon/chroot/device.go
Normal file
70
builder/amazon/chroot/device.go
Normal file
@ -0,0 +1,70 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// AvailableDevice finds an available device and returns it. Note that
|
||||
// you should externally hold a flock or something in order to guarantee
|
||||
// that this device is available across processes.
|
||||
func AvailableDevice() (string, error) {
|
||||
prefix, err := devicePrefix()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
letters := "fghijklmnop"
|
||||
for _, letter := range letters {
|
||||
device := fmt.Sprintf("/dev/%s%c", prefix, letter)
|
||||
|
||||
// If the block device itself, i.e. /dev/sf, exists, then we
|
||||
// can't use any of the numbers either.
|
||||
if _, err := os.Stat(device); err == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// To be able to build both Paravirtual and HVM images, the unnumbered
|
||||
// device and the first numbered one must be available.
|
||||
// E.g. /dev/xvdf and /dev/xvdf1
|
||||
numbered_device := fmt.Sprintf("%s%d", device, 1)
|
||||
if _, err := os.Stat(numbered_device); err != nil {
|
||||
return device, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", errors.New("available device could not be found")
|
||||
}
|
||||
|
||||
// devicePrefix returns the prefix ("sd" or "xvd" or so on) of the devices
|
||||
// on the system.
|
||||
func devicePrefix() (string, error) {
|
||||
available := []string{"sd", "xvd"}
|
||||
|
||||
f, err := os.Open("/sys/block")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
dirs, err := f.Readdirnames(-1)
|
||||
if dirs != nil && len(dirs) > 0 {
|
||||
for _, dir := range dirs {
|
||||
dirBase := filepath.Base(dir)
|
||||
for _, prefix := range available {
|
||||
if strings.HasPrefix(dirBase, prefix) {
|
||||
return prefix, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return "", errors.New("device prefix could not be detected")
|
||||
}
|
10
builder/amazon/chroot/device_test.go
Normal file
10
builder/amazon/chroot/device_test.go
Normal file
@ -0,0 +1,10 @@
|
||||
package chroot
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestDevicePrefixMatch(t *testing.T) {
|
||||
/*
|
||||
if devicePrefixMatch("nvme0n1") != "" {
|
||||
}
|
||||
*/
|
||||
}
|
16
builder/amazon/chroot/lockfile.go
Normal file
16
builder/amazon/chroot/lockfile.go
Normal file
@ -0,0 +1,16 @@
|
||||
// +build windows
|
||||
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
)
|
||||
|
||||
func lockFile(*os.File) error {
|
||||
return errors.New("not supported on Windows")
|
||||
}
|
||||
|
||||
func unlockFile(f *os.File) error {
|
||||
return nil
|
||||
}
|
27
builder/amazon/chroot/lockfile_unix.go
Normal file
27
builder/amazon/chroot/lockfile_unix.go
Normal file
@ -0,0 +1,27 @@
|
||||
// +build !windows
|
||||
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// See: http://linux.die.net/include/sys/file.h
|
||||
const LOCK_EX = 2
|
||||
const LOCK_NB = 4
|
||||
const LOCK_UN = 8
|
||||
|
||||
func lockFile(f *os.File) error {
|
||||
err := unix.Flock(int(f.Fd()), LOCK_EX)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func unlockFile(f *os.File) error {
|
||||
return unix.Flock(int(f.Fd()), LOCK_UN)
|
||||
}
|
97
builder/amazon/chroot/step_attach_volume.go
Normal file
97
builder/amazon/chroot/step_attach_volume.go
Normal file
@ -0,0 +1,97 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
// StepAttachVolume attaches the previously created volume to an
|
||||
// available device location.
|
||||
//
|
||||
// Produces:
|
||||
// device string - The location where the volume was attached.
|
||||
// attach_cleanup CleanupFunc
|
||||
type StepAttachVolume struct {
|
||||
PollingConfig *awscommon.AWSPollingConfig
|
||||
attached bool
|
||||
volumeId string
|
||||
}
|
||||
|
||||
func (s *StepAttachVolume) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
device := state.Get("device").(string)
|
||||
instance := state.Get("instance").(*ec2.Instance)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
volumeId := state.Get("volume_id").(string)
|
||||
|
||||
// For the API call, it expects "sd" prefixed devices.
|
||||
attachVolume := strings.Replace(device, "/xvd", "/sd", 1)
|
||||
|
||||
ui.Say(fmt.Sprintf("Attaching the root volume to %s", attachVolume))
|
||||
_, err := ec2conn.AttachVolume(&ec2.AttachVolumeInput{
|
||||
InstanceId: instance.InstanceId,
|
||||
VolumeId: &volumeId,
|
||||
Device: &attachVolume,
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error attaching volume: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Mark that we attached it so we can detach it later
|
||||
s.attached = true
|
||||
s.volumeId = volumeId
|
||||
|
||||
// Wait for the volume to become attached
|
||||
err = s.PollingConfig.WaitUntilVolumeAttached(ctx, ec2conn, s.volumeId)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error waiting for volume: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
state.Put("attach_cleanup", s)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepAttachVolume) Cleanup(state multistep.StateBag) {
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
if err := s.CleanupFunc(state); err != nil {
|
||||
ui.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StepAttachVolume) CleanupFunc(state multistep.StateBag) error {
|
||||
if !s.attached {
|
||||
return nil
|
||||
}
|
||||
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
ui.Say("Detaching EBS volume...")
|
||||
_, err := ec2conn.DetachVolume(&ec2.DetachVolumeInput{VolumeId: &s.volumeId})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error detaching EBS volume: %s", err)
|
||||
}
|
||||
|
||||
s.attached = false
|
||||
|
||||
// Wait for the volume to detach
|
||||
err = s.PollingConfig.WaitUntilVolumeDetached(aws.BackgroundContext(), ec2conn, s.volumeId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error waiting for volume: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
15
builder/amazon/chroot/step_attach_volume_test.go
Normal file
15
builder/amazon/chroot/step_attach_volume_test.go
Normal file
@ -0,0 +1,15 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/chroot"
|
||||
)
|
||||
|
||||
func TestAttachVolumeCleanupFunc_ImplementsCleanupFunc(t *testing.T) {
|
||||
var raw interface{}
|
||||
raw = new(StepAttachVolume)
|
||||
if _, ok := raw.(chroot.Cleanup); !ok {
|
||||
t.Fatalf("cleanup func should be a CleanupFunc")
|
||||
}
|
||||
}
|
32
builder/amazon/chroot/step_check_root_device.go
Normal file
32
builder/amazon/chroot/step_check_root_device.go
Normal file
@ -0,0 +1,32 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
// StepCheckRootDevice makes sure the root device on the AMI is EBS-backed.
|
||||
type StepCheckRootDevice struct{}
|
||||
|
||||
func (s *StepCheckRootDevice) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
image := state.Get("source_image").(*ec2.Image)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
ui.Say("Checking the root device on source AMI...")
|
||||
|
||||
// It must be EBS-backed otherwise the build won't work
|
||||
if *image.RootDeviceType != "ebs" {
|
||||
err := fmt.Errorf("The root device of the source AMI must be EBS-backed.")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepCheckRootDevice) Cleanup(multistep.StateBag) {}
|
169
builder/amazon/chroot/step_create_volume.go
Normal file
169
builder/amazon/chroot/step_create_volume.go
Normal file
@ -0,0 +1,169 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
// StepCreateVolume creates a new volume from the snapshot of the root
|
||||
// device of the AMI.
|
||||
//
|
||||
// Produces:
|
||||
// volume_id string - The ID of the created volume
|
||||
type StepCreateVolume struct {
|
||||
PollingConfig *awscommon.AWSPollingConfig
|
||||
volumeId string
|
||||
RootVolumeSize int64
|
||||
RootVolumeType string
|
||||
RootVolumeTags map[string]string
|
||||
Ctx interpolate.Context
|
||||
}
|
||||
|
||||
func (s *StepCreateVolume) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
instance := state.Get("instance").(*ec2.Instance)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
volTags, err := awscommon.TagMap(s.RootVolumeTags).EC2Tags(s.Ctx, *ec2conn.Config.Region, state)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error tagging volumes: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Collect tags for tagging on resource creation
|
||||
var tagSpecs []*ec2.TagSpecification
|
||||
|
||||
if len(volTags) > 0 {
|
||||
runVolTags := &ec2.TagSpecification{
|
||||
ResourceType: aws.String("volume"),
|
||||
Tags: volTags,
|
||||
}
|
||||
|
||||
tagSpecs = append(tagSpecs, runVolTags)
|
||||
}
|
||||
|
||||
var createVolume *ec2.CreateVolumeInput
|
||||
if config.FromScratch {
|
||||
rootVolumeType := ec2.VolumeTypeGp2
|
||||
if s.RootVolumeType == "io1" {
|
||||
err := errors.New("Cannot use io1 volume when building from scratch")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
} else if s.RootVolumeType != "" {
|
||||
rootVolumeType = s.RootVolumeType
|
||||
}
|
||||
createVolume = &ec2.CreateVolumeInput{
|
||||
AvailabilityZone: instance.Placement.AvailabilityZone,
|
||||
Size: aws.Int64(s.RootVolumeSize),
|
||||
VolumeType: aws.String(rootVolumeType),
|
||||
}
|
||||
|
||||
} else {
|
||||
// Determine the root device snapshot
|
||||
image := state.Get("source_image").(*ec2.Image)
|
||||
log.Printf("Searching for root device of the image (%s)", *image.RootDeviceName)
|
||||
var rootDevice *ec2.BlockDeviceMapping
|
||||
for _, device := range image.BlockDeviceMappings {
|
||||
if *device.DeviceName == *image.RootDeviceName {
|
||||
rootDevice = device
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
ui.Say("Creating the root volume...")
|
||||
createVolume, err = s.buildCreateVolumeInput(*instance.Placement.AvailabilityZone, rootDevice)
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
|
||||
if len(tagSpecs) > 0 {
|
||||
createVolume.SetTagSpecifications(tagSpecs)
|
||||
volTags.Report(ui)
|
||||
}
|
||||
log.Printf("Create args: %+v", createVolume)
|
||||
|
||||
createVolumeResp, err := ec2conn.CreateVolume(createVolume)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating root volume: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Set the volume ID so we remember to delete it later
|
||||
s.volumeId = *createVolumeResp.VolumeId
|
||||
log.Printf("Volume ID: %s", s.volumeId)
|
||||
|
||||
// Wait for the volume to become ready
|
||||
err = s.PollingConfig.WaitUntilVolumeAvailable(ctx, ec2conn, s.volumeId)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error waiting for volume: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
state.Put("volume_id", s.volumeId)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepCreateVolume) Cleanup(state multistep.StateBag) {
|
||||
if s.volumeId == "" {
|
||||
return
|
||||
}
|
||||
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
ui.Say("Deleting the created EBS volume...")
|
||||
_, err := ec2conn.DeleteVolume(&ec2.DeleteVolumeInput{VolumeId: &s.volumeId})
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Error deleting EBS volume: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StepCreateVolume) buildCreateVolumeInput(az string, rootDevice *ec2.BlockDeviceMapping) (*ec2.CreateVolumeInput, error) {
|
||||
if rootDevice == nil {
|
||||
return nil, fmt.Errorf("Couldn't find root device!")
|
||||
}
|
||||
createVolumeInput := &ec2.CreateVolumeInput{
|
||||
AvailabilityZone: aws.String(az),
|
||||
Size: rootDevice.Ebs.VolumeSize,
|
||||
SnapshotId: rootDevice.Ebs.SnapshotId,
|
||||
VolumeType: rootDevice.Ebs.VolumeType,
|
||||
Iops: rootDevice.Ebs.Iops,
|
||||
}
|
||||
if s.RootVolumeSize > *rootDevice.Ebs.VolumeSize {
|
||||
createVolumeInput.Size = aws.Int64(s.RootVolumeSize)
|
||||
}
|
||||
|
||||
if s.RootVolumeType == "" || s.RootVolumeType == *rootDevice.Ebs.VolumeType {
|
||||
return createVolumeInput, nil
|
||||
}
|
||||
|
||||
if s.RootVolumeType == "io1" {
|
||||
return nil, fmt.Errorf("Root volume type cannot be io1, because existing root volume type was %s", *rootDevice.Ebs.VolumeType)
|
||||
}
|
||||
|
||||
createVolumeInput.VolumeType = aws.String(s.RootVolumeType)
|
||||
// non io1 cannot set iops
|
||||
createVolumeInput.Iops = nil
|
||||
|
||||
return createVolumeInput, nil
|
||||
}
|
74
builder/amazon/chroot/step_create_volume_test.go
Normal file
74
builder/amazon/chroot/step_create_volume_test.go
Normal file
@ -0,0 +1,74 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func buildTestRootDevice() *ec2.BlockDeviceMapping {
|
||||
return &ec2.BlockDeviceMapping{
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeSize: aws.Int64(10),
|
||||
SnapshotId: aws.String("snap-1234"),
|
||||
VolumeType: aws.String("gp2"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateVolume_Default(t *testing.T) {
|
||||
stepCreateVolume := new(StepCreateVolume)
|
||||
_, err := stepCreateVolume.buildCreateVolumeInput("test-az", buildTestRootDevice())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestCreateVolume_Shrink(t *testing.T) {
|
||||
stepCreateVolume := StepCreateVolume{RootVolumeSize: 1}
|
||||
testRootDevice := buildTestRootDevice()
|
||||
ret, err := stepCreateVolume.buildCreateVolumeInput("test-az", testRootDevice)
|
||||
assert.NoError(t, err)
|
||||
// Ensure that the new value is equal to the size of the old root device
|
||||
assert.Equal(t, *ret.Size, *testRootDevice.Ebs.VolumeSize)
|
||||
}
|
||||
|
||||
func TestCreateVolume_Expand(t *testing.T) {
|
||||
stepCreateVolume := StepCreateVolume{RootVolumeSize: 25}
|
||||
testRootDevice := buildTestRootDevice()
|
||||
ret, err := stepCreateVolume.buildCreateVolumeInput("test-az", testRootDevice)
|
||||
assert.NoError(t, err)
|
||||
// Ensure that the new value is equal to the size of the value passed in
|
||||
assert.Equal(t, *ret.Size, stepCreateVolume.RootVolumeSize)
|
||||
}
|
||||
|
||||
func TestCreateVolume_io1_to_io1(t *testing.T) {
|
||||
stepCreateVolume := StepCreateVolume{RootVolumeType: "io1"}
|
||||
testRootDevice := buildTestRootDevice()
|
||||
testRootDevice.Ebs.VolumeType = aws.String("io1")
|
||||
testRootDevice.Ebs.Iops = aws.Int64(1000)
|
||||
ret, err := stepCreateVolume.buildCreateVolumeInput("test-az", testRootDevice)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, *ret.VolumeType, stepCreateVolume.RootVolumeType)
|
||||
assert.Equal(t, *ret.Iops, *testRootDevice.Ebs.Iops)
|
||||
}
|
||||
|
||||
func TestCreateVolume_io1_to_gp2(t *testing.T) {
|
||||
stepCreateVolume := StepCreateVolume{RootVolumeType: "gp2"}
|
||||
testRootDevice := buildTestRootDevice()
|
||||
testRootDevice.Ebs.VolumeType = aws.String("io1")
|
||||
testRootDevice.Ebs.Iops = aws.Int64(1000)
|
||||
|
||||
ret, err := stepCreateVolume.buildCreateVolumeInput("test-az", testRootDevice)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, *ret.VolumeType, stepCreateVolume.RootVolumeType)
|
||||
assert.Nil(t, ret.Iops)
|
||||
}
|
||||
|
||||
func TestCreateVolume_gp2_to_io1(t *testing.T) {
|
||||
stepCreateVolume := StepCreateVolume{RootVolumeType: "io1"}
|
||||
testRootDevice := buildTestRootDevice()
|
||||
|
||||
_, err := stepCreateVolume.buildCreateVolumeInput("test-az", testRootDevice)
|
||||
assert.Error(t, err)
|
||||
}
|
31
builder/amazon/chroot/step_early_unflock.go
Normal file
31
builder/amazon/chroot/step_early_unflock.go
Normal file
@ -0,0 +1,31 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/chroot"
|
||||
)
|
||||
|
||||
// StepEarlyUnflock unlocks the flock.
|
||||
type StepEarlyUnflock struct{}
|
||||
|
||||
func (s *StepEarlyUnflock) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
cleanup := state.Get("flock_cleanup").(chroot.Cleanup)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
log.Println("Unlocking file lock...")
|
||||
if err := cleanup.CleanupFunc(state); err != nil {
|
||||
err := fmt.Errorf("Error unlocking file lock: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepEarlyUnflock) Cleanup(state multistep.StateBag) {}
|
74
builder/amazon/chroot/step_flock.go
Normal file
74
builder/amazon/chroot/step_flock.go
Normal file
@ -0,0 +1,74 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
// StepFlock provisions the instance within a chroot.
|
||||
//
|
||||
// Produces:
|
||||
// flock_cleanup Cleanup - To perform early cleanup
|
||||
type StepFlock struct {
|
||||
fh *os.File
|
||||
}
|
||||
|
||||
func (s *StepFlock) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
lockfile := "/var/lock/packer-chroot/lock"
|
||||
if err := os.MkdirAll(filepath.Dir(lockfile), 0755); err != nil {
|
||||
err := fmt.Errorf("Error creating lock: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
log.Printf("Obtaining lock: %s", lockfile)
|
||||
f, err := os.Create(lockfile)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating lock: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// LOCK!
|
||||
if err := lockFile(f); err != nil {
|
||||
err := fmt.Errorf("Error obtaining lock: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Set the file handle, we can't close it because we need to hold
|
||||
// the lock.
|
||||
s.fh = f
|
||||
|
||||
state.Put("flock_cleanup", s)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepFlock) Cleanup(state multistep.StateBag) {
|
||||
s.CleanupFunc(state)
|
||||
}
|
||||
|
||||
func (s *StepFlock) CleanupFunc(state multistep.StateBag) error {
|
||||
if s.fh == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Printf("Unlocking: %s", s.fh.Name())
|
||||
if err := unlockFile(s.fh); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.fh = nil
|
||||
return nil
|
||||
}
|
15
builder/amazon/chroot/step_flock_test.go
Normal file
15
builder/amazon/chroot/step_flock_test.go
Normal file
@ -0,0 +1,15 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/chroot"
|
||||
)
|
||||
|
||||
func TestFlockCleanupFunc_ImplementsCleanupFunc(t *testing.T) {
|
||||
var raw interface{}
|
||||
raw = new(StepFlock)
|
||||
if _, ok := raw.(chroot.Cleanup); !ok {
|
||||
t.Fatalf("cleanup func should be a CleanupFunc")
|
||||
}
|
||||
}
|
60
builder/amazon/chroot/step_instance_info.go
Normal file
60
builder/amazon/chroot/step_instance_info.go
Normal file
@ -0,0 +1,60 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
// StepInstanceInfo verifies that this builder is running on an EC2 instance.
|
||||
type StepInstanceInfo struct{}
|
||||
|
||||
func (s *StepInstanceInfo) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
session := state.Get("awsSession").(*session.Session)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
// Get our own instance ID
|
||||
ui.Say("Gathering information about this EC2 instance...")
|
||||
|
||||
ec2meta := ec2metadata.New(session)
|
||||
identity, err := ec2meta.GetInstanceIdentityDocument()
|
||||
if err != nil {
|
||||
err := fmt.Errorf(
|
||||
"Error retrieving the ID of the instance Packer is running on.\n" +
|
||||
"Please verify Packer is running on a proper AWS EC2 instance.")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
log.Printf("Instance ID: %s", identity.InstanceID)
|
||||
|
||||
// Query the entire instance metadata
|
||||
instancesResp, err := ec2conn.DescribeInstances(&ec2.DescribeInstancesInput{InstanceIds: []*string{&identity.InstanceID}})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error getting instance data: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
if len(instancesResp.Reservations) == 0 {
|
||||
err := fmt.Errorf("Error getting instance data: no instance found.")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
instance := instancesResp.Reservations[0].Instances[0]
|
||||
state.Put("instance", instance)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepInstanceInfo) Cleanup(multistep.StateBag) {}
|
157
builder/amazon/chroot/step_mount_device.go
Normal file
157
builder/amazon/chroot/step_mount_device.go
Normal file
@ -0,0 +1,157 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/common"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/packerbuilderdata"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
type mountPathData struct {
|
||||
Device string
|
||||
}
|
||||
|
||||
// StepMountDevice mounts the attached device.
|
||||
//
|
||||
// Produces:
|
||||
// mount_path string - The location where the volume was mounted.
|
||||
// mount_device_cleanup CleanupFunc - To perform early cleanup
|
||||
type StepMountDevice struct {
|
||||
MountOptions []string
|
||||
MountPartition string
|
||||
|
||||
mountPath string
|
||||
GeneratedData *packerbuilderdata.GeneratedData
|
||||
}
|
||||
|
||||
func (s *StepMountDevice) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
device := state.Get("device").(string)
|
||||
if config.NVMEDevicePath != "" {
|
||||
// customizable device path for mounting NVME block devices on c5 and m5 HVM
|
||||
device = config.NVMEDevicePath
|
||||
}
|
||||
wrappedCommand := state.Get("wrappedCommand").(common.CommandWrapper)
|
||||
|
||||
var virtualizationType string
|
||||
if config.FromScratch || config.AMIVirtType != "" {
|
||||
virtualizationType = config.AMIVirtType
|
||||
} else {
|
||||
image := state.Get("source_image").(*ec2.Image)
|
||||
virtualizationType = *image.VirtualizationType
|
||||
log.Printf("Source image virtualization type is: %s", virtualizationType)
|
||||
}
|
||||
|
||||
ictx := config.ctx
|
||||
|
||||
ictx.Data = &mountPathData{Device: filepath.Base(device)}
|
||||
mountPath, err := interpolate.Render(config.MountPath, &ictx)
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error preparing mount directory: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
mountPath, err = filepath.Abs(mountPath)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error preparing mount directory: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
log.Printf("Mount path: %s", mountPath)
|
||||
|
||||
if err := os.MkdirAll(mountPath, 0755); err != nil {
|
||||
err := fmt.Errorf("Error creating mount directory: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
deviceMount := device
|
||||
|
||||
if virtualizationType == "hvm" && s.MountPartition != "0" {
|
||||
deviceMount = fmt.Sprintf("%s%s", device, s.MountPartition)
|
||||
}
|
||||
state.Put("deviceMount", deviceMount)
|
||||
|
||||
ui.Say("Mounting the root device...")
|
||||
stderr := new(bytes.Buffer)
|
||||
|
||||
// build mount options from mount_options config, useful for nouuid options
|
||||
// or other specific device type settings for mount
|
||||
opts := ""
|
||||
if len(s.MountOptions) > 0 {
|
||||
opts = "-o " + strings.Join(s.MountOptions, " -o ")
|
||||
}
|
||||
mountCommand, err := wrappedCommand(
|
||||
fmt.Sprintf("mount %s %s %s", opts, deviceMount, mountPath))
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating mount command: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
log.Printf("[DEBUG] (step mount) mount command is %s", mountCommand)
|
||||
cmd := common.ShellCommand(mountCommand)
|
||||
cmd.Stderr = stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
err := fmt.Errorf(
|
||||
"Error mounting root volume: %s\nStderr: %s", err, stderr.String())
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Set the mount path so we remember to unmount it later
|
||||
s.mountPath = mountPath
|
||||
state.Put("mount_path", s.mountPath)
|
||||
s.GeneratedData.Put("MountPath", s.mountPath)
|
||||
state.Put("mount_device_cleanup", s)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepMountDevice) Cleanup(state multistep.StateBag) {
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
if err := s.CleanupFunc(state); err != nil {
|
||||
ui.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StepMountDevice) CleanupFunc(state multistep.StateBag) error {
|
||||
if s.mountPath == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
wrappedCommand := state.Get("wrappedCommand").(common.CommandWrapper)
|
||||
|
||||
ui.Say("Unmounting the root device...")
|
||||
unmountCommand, err := wrappedCommand(fmt.Sprintf("umount %s", s.mountPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating unmount command: %s", err)
|
||||
}
|
||||
|
||||
cmd := common.ShellCommand(unmountCommand)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("Error unmounting root device: %s", err)
|
||||
}
|
||||
|
||||
s.mountPath = ""
|
||||
return nil
|
||||
}
|
15
builder/amazon/chroot/step_mount_device_test.go
Normal file
15
builder/amazon/chroot/step_mount_device_test.go
Normal file
@ -0,0 +1,15 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/chroot"
|
||||
)
|
||||
|
||||
func TestMountDeviceCleanupFunc_ImplementsCleanupFunc(t *testing.T) {
|
||||
var raw interface{}
|
||||
raw = new(StepMountDevice)
|
||||
if _, ok := raw.(chroot.Cleanup); !ok {
|
||||
t.Fatalf("cleanup func should be a CleanupFunc")
|
||||
}
|
||||
}
|
49
builder/amazon/chroot/step_prepare_device.go
Normal file
49
builder/amazon/chroot/step_prepare_device.go
Normal file
@ -0,0 +1,49 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/packerbuilderdata"
|
||||
)
|
||||
|
||||
// StepPrepareDevice finds an available device and sets it.
|
||||
type StepPrepareDevice struct {
|
||||
GeneratedData *packerbuilderdata.GeneratedData
|
||||
}
|
||||
|
||||
func (s *StepPrepareDevice) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
device := config.DevicePath
|
||||
if device == "" {
|
||||
var err error
|
||||
log.Println("Device path not specified, searching for available device...")
|
||||
device, err = AvailableDevice()
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error finding available device: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := os.Stat(device); err == nil {
|
||||
err := fmt.Errorf("Device is in use: %s", device)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
log.Printf("Device: %s", device)
|
||||
state.Put("device", device)
|
||||
s.GeneratedData.Put("Device", device)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepPrepareDevice) Cleanup(state multistep.StateBag) {}
|
169
builder/amazon/chroot/step_register_ami.go
Normal file
169
builder/amazon/chroot/step_register_ami.go
Normal file
@ -0,0 +1,169 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
confighelper "github.com/hashicorp/packer/packer-plugin-sdk/config"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/random"
|
||||
)
|
||||
|
||||
// StepRegisterAMI creates the AMI.
|
||||
type StepRegisterAMI struct {
|
||||
PollingConfig *awscommon.AWSPollingConfig
|
||||
RootVolumeSize int64
|
||||
EnableAMIENASupport confighelper.Trilean
|
||||
EnableAMISriovNetSupport bool
|
||||
AMISkipBuildRegion bool
|
||||
}
|
||||
|
||||
func (s *StepRegisterAMI) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
snapshotID := state.Get("snapshot_id").(string)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
ui.Say("Registering the AMI...")
|
||||
|
||||
var registerOpts *ec2.RegisterImageInput
|
||||
|
||||
// Create the image
|
||||
amiName := config.AMIName
|
||||
state.Put("intermediary_image", false)
|
||||
if config.AMIEncryptBootVolume.True() || s.AMISkipBuildRegion {
|
||||
state.Put("intermediary_image", true)
|
||||
|
||||
// From AWS SDK docs: You can encrypt a copy of an unencrypted snapshot,
|
||||
// but you cannot use it to create an unencrypted copy of an encrypted
|
||||
// snapshot. Your default CMK for EBS is used unless you specify a
|
||||
// non-default key using KmsKeyId.
|
||||
|
||||
// If encrypt_boot is nil or true, we need to create a temporary image
|
||||
// so that in step_region_copy, we can copy it with the correct
|
||||
// encryption
|
||||
amiName = random.AlphaNum(7)
|
||||
}
|
||||
|
||||
// Source Image is only required to be passed if the image is not from scratch
|
||||
if config.FromScratch {
|
||||
registerOpts = buildBaseRegisterOpts(config, nil, s.RootVolumeSize, snapshotID, amiName)
|
||||
} else {
|
||||
image := state.Get("source_image").(*ec2.Image)
|
||||
registerOpts = buildBaseRegisterOpts(config, image, s.RootVolumeSize, snapshotID, amiName)
|
||||
}
|
||||
|
||||
if s.EnableAMISriovNetSupport {
|
||||
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
||||
// As of February 2017, this applies to C3, C4, D2, I2, R3, and M4 (excluding m4.16xlarge)
|
||||
registerOpts.SriovNetSupport = aws.String("simple")
|
||||
}
|
||||
if s.EnableAMIENASupport.True() {
|
||||
// Set EnaSupport to true
|
||||
// As of February 2017, this applies to C5, I3, P2, R4, X1, and m4.16xlarge
|
||||
registerOpts.EnaSupport = aws.Bool(true)
|
||||
}
|
||||
|
||||
registerResp, err := ec2conn.RegisterImage(registerOpts)
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error registering AMI: %s", err))
|
||||
ui.Error(state.Get("error").(error).Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Set the AMI ID in the state
|
||||
ui.Say(fmt.Sprintf("AMI: %s", *registerResp.ImageId))
|
||||
amis := make(map[string]string)
|
||||
amis[*ec2conn.Config.Region] = *registerResp.ImageId
|
||||
state.Put("amis", amis)
|
||||
|
||||
ui.Say("Waiting for AMI to become ready...")
|
||||
if err := s.PollingConfig.WaitUntilAMIAvailable(ctx, ec2conn, *registerResp.ImageId); err != nil {
|
||||
err := fmt.Errorf("Error waiting for AMI: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepRegisterAMI) Cleanup(state multistep.StateBag) {}
|
||||
|
||||
// Builds the base register opts with architecture, name, root block device, mappings, virtualizationtype
|
||||
func buildBaseRegisterOpts(config *Config, sourceImage *ec2.Image, rootVolumeSize int64, snapshotID string, amiName string) *ec2.RegisterImageInput {
|
||||
var (
|
||||
mappings []*ec2.BlockDeviceMapping
|
||||
rootDeviceName string
|
||||
)
|
||||
|
||||
generatingNewBlockDeviceMappings := config.FromScratch || len(config.AMIMappings) > 0
|
||||
if generatingNewBlockDeviceMappings {
|
||||
mappings = config.AMIMappings.BuildEC2BlockDeviceMappings()
|
||||
rootDeviceName = config.RootDeviceName
|
||||
} else {
|
||||
// If config.FromScratch is false, source image must be set
|
||||
mappings = sourceImage.BlockDeviceMappings
|
||||
rootDeviceName = *sourceImage.RootDeviceName
|
||||
}
|
||||
|
||||
newMappings := make([]*ec2.BlockDeviceMapping, len(mappings))
|
||||
for i, device := range mappings {
|
||||
newDevice := device
|
||||
if *newDevice.DeviceName == rootDeviceName {
|
||||
if newDevice.Ebs != nil {
|
||||
newDevice.Ebs.SnapshotId = aws.String(snapshotID)
|
||||
} else {
|
||||
newDevice.Ebs = &ec2.EbsBlockDevice{SnapshotId: aws.String(snapshotID)}
|
||||
}
|
||||
|
||||
if generatingNewBlockDeviceMappings || rootVolumeSize > *newDevice.Ebs.VolumeSize {
|
||||
newDevice.Ebs.VolumeSize = aws.Int64(rootVolumeSize)
|
||||
}
|
||||
}
|
||||
|
||||
// assume working from a snapshot, so we unset the Encrypted field if set,
|
||||
// otherwise AWS API will return InvalidParameter
|
||||
if newDevice.Ebs != nil && newDevice.Ebs.Encrypted != nil {
|
||||
newDevice.Ebs.Encrypted = nil
|
||||
}
|
||||
|
||||
newMappings[i] = newDevice
|
||||
}
|
||||
|
||||
if config.FromScratch {
|
||||
return &ec2.RegisterImageInput{
|
||||
Name: &amiName,
|
||||
Architecture: aws.String(config.Architecture),
|
||||
RootDeviceName: aws.String(rootDeviceName),
|
||||
VirtualizationType: aws.String(config.AMIVirtType),
|
||||
BlockDeviceMappings: newMappings,
|
||||
}
|
||||
}
|
||||
|
||||
return buildRegisterOptsFromExistingImage(config, sourceImage, newMappings, rootDeviceName, amiName)
|
||||
}
|
||||
|
||||
func buildRegisterOptsFromExistingImage(config *Config, image *ec2.Image, mappings []*ec2.BlockDeviceMapping, rootDeviceName string, amiName string) *ec2.RegisterImageInput {
|
||||
registerOpts := &ec2.RegisterImageInput{
|
||||
Name: &amiName,
|
||||
Architecture: image.Architecture,
|
||||
RootDeviceName: &rootDeviceName,
|
||||
BlockDeviceMappings: mappings,
|
||||
VirtualizationType: image.VirtualizationType,
|
||||
}
|
||||
|
||||
if config.AMIVirtType != "" {
|
||||
registerOpts.VirtualizationType = aws.String(config.AMIVirtType)
|
||||
}
|
||||
|
||||
if config.AMIVirtType != "hvm" {
|
||||
registerOpts.KernelId = image.KernelId
|
||||
registerOpts.RamdiskId = image.RamdiskId
|
||||
}
|
||||
return registerOpts
|
||||
}
|
216
builder/amazon/chroot/step_register_ami_test.go
Normal file
216
builder/amazon/chroot/step_register_ami_test.go
Normal file
@ -0,0 +1,216 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
amazon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/common"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
)
|
||||
|
||||
func testImage() ec2.Image {
|
||||
return ec2.Image{
|
||||
ImageId: aws.String("ami-abcd1234"),
|
||||
Name: aws.String("ami_test_name"),
|
||||
Architecture: aws.String("x86_64"),
|
||||
KernelId: aws.String("aki-abcd1234"),
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepRegisterAmi_buildRegisterOpts_pv(t *testing.T) {
|
||||
config := Config{}
|
||||
config.AMIName = "test_ami_name"
|
||||
config.AMIDescription = "test_ami_description"
|
||||
config.AMIVirtType = "paravirtual"
|
||||
rootDeviceName := "foo"
|
||||
|
||||
image := testImage()
|
||||
|
||||
blockDevices := []*ec2.BlockDeviceMapping{}
|
||||
|
||||
opts := buildRegisterOptsFromExistingImage(&config, &image, blockDevices, rootDeviceName, config.AMIName)
|
||||
|
||||
expected := config.AMIVirtType
|
||||
if *opts.VirtualizationType != expected {
|
||||
t.Fatalf("Unexpected VirtType value: expected %s got %s\n", expected, *opts.VirtualizationType)
|
||||
}
|
||||
|
||||
expected = config.AMIName
|
||||
if *opts.Name != expected {
|
||||
t.Fatalf("Unexpected Name value: expected %s got %s\n", expected, *opts.Name)
|
||||
}
|
||||
|
||||
expected = *image.KernelId
|
||||
if *opts.KernelId != expected {
|
||||
t.Fatalf("Unexpected KernelId value: expected %s got %s\n", expected, *opts.KernelId)
|
||||
}
|
||||
|
||||
expected = rootDeviceName
|
||||
if *opts.RootDeviceName != expected {
|
||||
t.Fatalf("Unexpected RootDeviceName value: expected %s got %s\n", expected, *opts.RootDeviceName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepRegisterAmi_buildRegisterOpts_hvm(t *testing.T) {
|
||||
config := Config{}
|
||||
config.AMIName = "test_ami_name"
|
||||
config.AMIDescription = "test_ami_description"
|
||||
config.AMIVirtType = "hvm"
|
||||
rootDeviceName := "foo"
|
||||
|
||||
image := testImage()
|
||||
|
||||
blockDevices := []*ec2.BlockDeviceMapping{}
|
||||
|
||||
opts := buildRegisterOptsFromExistingImage(&config, &image, blockDevices, rootDeviceName, config.AMIName)
|
||||
|
||||
expected := config.AMIVirtType
|
||||
if *opts.VirtualizationType != expected {
|
||||
t.Fatalf("Unexpected VirtType value: expected %s got %s\n", expected, *opts.VirtualizationType)
|
||||
}
|
||||
|
||||
expected = config.AMIName
|
||||
if *opts.Name != expected {
|
||||
t.Fatalf("Unexpected Name value: expected %s got %s\n", expected, *opts.Name)
|
||||
}
|
||||
|
||||
if opts.KernelId != nil {
|
||||
t.Fatalf("Unexpected KernelId value: expected nil got %s\n", *opts.KernelId)
|
||||
}
|
||||
|
||||
expected = rootDeviceName
|
||||
if *opts.RootDeviceName != expected {
|
||||
t.Fatalf("Unexpected RootDeviceName value: expected %s got %s\n", expected, *opts.RootDeviceName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepRegisterAmi_buildRegisterOptsFromScratch(t *testing.T) {
|
||||
rootDeviceName := "/dev/sda"
|
||||
snapshotID := "foo"
|
||||
config := Config{
|
||||
FromScratch: true,
|
||||
PackerConfig: common.PackerConfig{},
|
||||
AMIMappings: []amazon.BlockDevice{
|
||||
{
|
||||
DeviceName: rootDeviceName,
|
||||
},
|
||||
},
|
||||
RootDeviceName: rootDeviceName,
|
||||
}
|
||||
registerOpts := buildBaseRegisterOpts(&config, nil, 10, snapshotID, config.AMIName)
|
||||
|
||||
if len(registerOpts.BlockDeviceMappings) != 1 {
|
||||
t.Fatal("Expected block device mapping of length 1")
|
||||
}
|
||||
|
||||
if *registerOpts.BlockDeviceMappings[0].Ebs.SnapshotId != snapshotID {
|
||||
t.Fatalf("Snapshot ID of root disk not set to snapshot id %s", rootDeviceName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepRegisterAmi_buildRegisterOptFromExistingImage(t *testing.T) {
|
||||
rootDeviceName := "/dev/sda"
|
||||
snapshotID := "foo"
|
||||
|
||||
config := Config{
|
||||
FromScratch: false,
|
||||
PackerConfig: common.PackerConfig{},
|
||||
}
|
||||
sourceImage := ec2.Image{
|
||||
RootDeviceName: &rootDeviceName,
|
||||
BlockDeviceMappings: []*ec2.BlockDeviceMapping{
|
||||
{
|
||||
DeviceName: &rootDeviceName,
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeSize: aws.Int64(10),
|
||||
},
|
||||
},
|
||||
// Throw in an ephemeral device, it seems like all devices in the return struct in a source AMI have
|
||||
// a size, even if it's for ephemeral
|
||||
{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
VirtualName: aws.String("ephemeral0"),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeSize: aws.Int64(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
registerOpts := buildBaseRegisterOpts(&config, &sourceImage, 15, snapshotID, config.AMIName)
|
||||
|
||||
if len(registerOpts.BlockDeviceMappings) != 2 {
|
||||
t.Fatal("Expected block device mapping of length 2")
|
||||
}
|
||||
|
||||
for _, dev := range registerOpts.BlockDeviceMappings {
|
||||
if dev.Ebs.SnapshotId != nil && *dev.Ebs.SnapshotId == snapshotID {
|
||||
// Even though root volume size is in config, it isn't used, instead we use the root volume size
|
||||
// that's derived when we build the step
|
||||
if *dev.Ebs.VolumeSize != 15 {
|
||||
t.Fatalf("Root volume size not 15 GB instead %d", *dev.Ebs.VolumeSize)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
t.Fatalf("Could not find device with snapshot ID %s", snapshotID)
|
||||
}
|
||||
|
||||
func TestStepRegisterAmi_buildRegisterOptFromExistingImageWithBlockDeviceMappings(t *testing.T) {
|
||||
const (
|
||||
rootDeviceName = "/dev/xvda"
|
||||
oldRootDevice = "/dev/sda1"
|
||||
)
|
||||
snapshotId := "foo"
|
||||
|
||||
config := Config{
|
||||
FromScratch: false,
|
||||
PackerConfig: common.PackerConfig{},
|
||||
AMIMappings: []amazon.BlockDevice{
|
||||
{
|
||||
DeviceName: rootDeviceName,
|
||||
},
|
||||
},
|
||||
RootDeviceName: rootDeviceName,
|
||||
}
|
||||
|
||||
// Intentionally try to use a different root devicename
|
||||
sourceImage := ec2.Image{
|
||||
RootDeviceName: aws.String(oldRootDevice),
|
||||
BlockDeviceMappings: []*ec2.BlockDeviceMapping{
|
||||
{
|
||||
DeviceName: aws.String(oldRootDevice),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeSize: aws.Int64(10),
|
||||
},
|
||||
},
|
||||
// Throw in an ephemeral device, it seems like all devices in the return struct in a source AMI have
|
||||
// a size, even if it's for ephemeral
|
||||
{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
VirtualName: aws.String("ephemeral0"),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeSize: aws.Int64(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
registerOpts := buildBaseRegisterOpts(&config, &sourceImage, 15, snapshotId, config.AMIName)
|
||||
|
||||
if len(registerOpts.BlockDeviceMappings) != 1 {
|
||||
t.Fatal("Expected block device mapping of length 1")
|
||||
}
|
||||
|
||||
if *registerOpts.BlockDeviceMappings[0].Ebs.SnapshotId != snapshotId {
|
||||
t.Fatalf("Snapshot ID of root disk set to '%s' expected '%s'", *registerOpts.BlockDeviceMappings[0].Ebs.SnapshotId, rootDeviceName)
|
||||
}
|
||||
|
||||
if *registerOpts.RootDeviceName != rootDeviceName {
|
||||
t.Fatalf("Root device set to '%s' expected %s", *registerOpts.RootDeviceName, rootDeviceName)
|
||||
}
|
||||
|
||||
if *registerOpts.BlockDeviceMappings[0].Ebs.VolumeSize != 15 {
|
||||
t.Fatalf("Size of root disk not set to 15 GB, instead %d", *registerOpts.BlockDeviceMappings[0].Ebs.VolumeSize)
|
||||
}
|
||||
}
|
82
builder/amazon/chroot/step_snapshot.go
Normal file
82
builder/amazon/chroot/step_snapshot.go
Normal file
@ -0,0 +1,82 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
// StepSnapshot creates a snapshot of the created volume.
|
||||
//
|
||||
// Produces:
|
||||
// snapshot_id string - ID of the created snapshot
|
||||
type StepSnapshot struct {
|
||||
PollingConfig *awscommon.AWSPollingConfig
|
||||
snapshotId string
|
||||
}
|
||||
|
||||
func (s *StepSnapshot) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
volumeId := state.Get("volume_id").(string)
|
||||
|
||||
ui.Say("Creating snapshot...")
|
||||
description := fmt.Sprintf("Packer: %s", time.Now().String())
|
||||
|
||||
createSnapResp, err := ec2conn.CreateSnapshot(&ec2.CreateSnapshotInput{
|
||||
VolumeId: &volumeId,
|
||||
Description: &description,
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating snapshot: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Set the snapshot ID so we can delete it later
|
||||
s.snapshotId = *createSnapResp.SnapshotId
|
||||
ui.Message(fmt.Sprintf("Snapshot ID: %s", s.snapshotId))
|
||||
|
||||
// Wait for the snapshot to be ready
|
||||
err = s.PollingConfig.WaitUntilSnapshotDone(ctx, ec2conn, s.snapshotId)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error waiting for snapshot: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
state.Put("snapshot_id", s.snapshotId)
|
||||
|
||||
snapshots := map[string][]string{
|
||||
*ec2conn.Config.Region: {s.snapshotId},
|
||||
}
|
||||
state.Put("snapshots", snapshots)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepSnapshot) Cleanup(state multistep.StateBag) {
|
||||
if s.snapshotId == "" {
|
||||
return
|
||||
}
|
||||
|
||||
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||
_, halted := state.GetOk(multistep.StateHalted)
|
||||
|
||||
if cancelled || halted {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
ui.Say("Removing snapshot since we cancelled or halted...")
|
||||
_, err := ec2conn.DeleteSnapshot(&ec2.DeleteSnapshotInput{SnapshotId: &s.snapshotId})
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Error: %s", err))
|
||||
}
|
||||
}
|
||||
}
|
431
builder/amazon/common/access_config.go
Normal file
431
builder/amazon/common/access_config.go
Normal file
@ -0,0 +1,431 @@
|
||||
//go:generate struct-markdown
|
||||
//go:generate mapstructure-to-hcl2 -type VaultAWSEngineOptions,AssumeRoleConfig
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
awsCredentials "github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
awsbase "github.com/hashicorp/aws-sdk-go-base"
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/packer/builder/amazon/common/awserrors"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/template/interpolate"
|
||||
vaultapi "github.com/hashicorp/vault/api"
|
||||
)
|
||||
|
||||
// AssumeRoleConfig lets users set configuration options for assuming a special
|
||||
// role when executing Packer.
|
||||
//
|
||||
// Usage example:
|
||||
//
|
||||
// HCL config example:
|
||||
//
|
||||
// ```HCL
|
||||
// source "example" "amazon-ebs"{
|
||||
// assume_role {
|
||||
// role_arn = "arn:aws:iam::ACCOUNT_ID:role/ROLE_NAME"
|
||||
// session_name = "SESSION_NAME"
|
||||
// external_id = "EXTERNAL_ID"
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// JSON config example:
|
||||
//
|
||||
// ```json
|
||||
// builder{
|
||||
// "type": "amazon-ebs",
|
||||
// "assume_role": {
|
||||
// "role_arn" : "arn:aws:iam::ACCOUNT_ID:role/ROLE_NAME",
|
||||
// "session_name": "SESSION_NAME",
|
||||
// "external_id" : "EXTERNAL_ID"
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
type AssumeRoleConfig struct {
|
||||
// Amazon Resource Name (ARN) of the IAM Role to assume.
|
||||
AssumeRoleARN string `mapstructure:"role_arn" required:"false"`
|
||||
// Number of seconds to restrict the assume role session duration.
|
||||
AssumeRoleDurationSeconds int `mapstructure:"duration_seconds" required:"false"`
|
||||
// The external ID to use when assuming the role. If omitted, no external
|
||||
// ID is passed to the AssumeRole call.
|
||||
AssumeRoleExternalID string `mapstructure:"external_id" required:"false"`
|
||||
// IAM Policy JSON describing further restricting permissions for the IAM
|
||||
// Role being assumed.
|
||||
AssumeRolePolicy string `mapstructure:"policy" required:"false"`
|
||||
// Set of Amazon Resource Names (ARNs) of IAM Policies describing further
|
||||
// restricting permissions for the IAM Role being
|
||||
AssumeRolePolicyARNs []string `mapstructure:"policy_arns" required:"false"`
|
||||
// Session name to use when assuming the role.
|
||||
AssumeRoleSessionName string `mapstructure:"session_name" required:"false"`
|
||||
// Map of assume role session tags.
|
||||
AssumeRoleTags map[string]string `mapstructure:"tags" required:"false"`
|
||||
// Set of assume role session tag keys to pass to any subsequent sessions.
|
||||
AssumeRoleTransitiveTagKeys []string `mapstructure:"transitive_tag_keys" required:"false"`
|
||||
}
|
||||
|
||||
type VaultAWSEngineOptions struct {
|
||||
Name string `mapstructure:"name"`
|
||||
RoleARN string `mapstructure:"role_arn"`
|
||||
// Specifies the TTL for the use of the STS token. This
|
||||
// is specified as a string with a duration suffix. Valid only when
|
||||
// credential_type is assumed_role or federation_token. When not
|
||||
// specified, the default_sts_ttl set for the role will be used. If that
|
||||
// is also not set, then the default value of 3600s will be used. AWS
|
||||
// places limits on the maximum TTL allowed. See the AWS documentation on
|
||||
// the DurationSeconds parameter for AssumeRole (for assumed_role
|
||||
// credential types) and GetFederationToken (for federation_token
|
||||
// credential types) for more details.
|
||||
TTL string `mapstructure:"ttl" required:"false"`
|
||||
EngineName string `mapstructure:"engine_name"`
|
||||
}
|
||||
|
||||
func (v *VaultAWSEngineOptions) Empty() bool {
|
||||
return len(v.Name) == 0 && len(v.RoleARN) == 0 &&
|
||||
len(v.EngineName) == 0 && len(v.TTL) == 0
|
||||
}
|
||||
|
||||
// AccessConfig is for common configuration related to AWS access
|
||||
type AccessConfig struct {
|
||||
// The access key used to communicate with AWS. [Learn how to set this]
|
||||
// (/docs/builders/amazon#specifying-amazon-credentials). On EBS, this
|
||||
// is not required if you are using `use_vault_aws_engine` for
|
||||
// authentication instead.
|
||||
AccessKey string `mapstructure:"access_key" required:"true"`
|
||||
// If provided with a role ARN, Packer will attempt to assume this role
|
||||
// using the supplied credentials. See
|
||||
// [AssumeRoleConfig](#assume-role-configuration) below for more
|
||||
// details on all of the options available, and for a usage example.
|
||||
AssumeRole AssumeRoleConfig `mapstructure:"assume_role" required:"false"`
|
||||
// This option is useful if you use a cloud
|
||||
// provider whose API is compatible with aws EC2. Specify another endpoint
|
||||
// like this https://ec2.custom.endpoint.com.
|
||||
CustomEndpointEc2 string `mapstructure:"custom_endpoint_ec2" required:"false"`
|
||||
// Path to a credentials file to load credentials from
|
||||
CredsFilename string `mapstructure:"shared_credentials_file" required:"false"`
|
||||
// Enable automatic decoding of any encoded authorization (error) messages
|
||||
// using the `sts:DecodeAuthorizationMessage` API. Note: requires that the
|
||||
// effective user/role have permissions to `sts:DecodeAuthorizationMessage`
|
||||
// on resource `*`. Default `false`.
|
||||
DecodeAuthZMessages bool `mapstructure:"decode_authorization_messages" required:"false"`
|
||||
// This allows skipping TLS
|
||||
// verification of the AWS EC2 endpoint. The default is false.
|
||||
InsecureSkipTLSVerify bool `mapstructure:"insecure_skip_tls_verify" required:"false"`
|
||||
// This is the maximum number of times an API call is retried, in the case
|
||||
// where requests are being throttled or experiencing transient failures.
|
||||
// The delay between the subsequent API calls increases exponentially.
|
||||
MaxRetries int `mapstructure:"max_retries" required:"false"`
|
||||
// The MFA
|
||||
// [TOTP](https://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm)
|
||||
// code. This should probably be a user variable since it changes all the
|
||||
// time.
|
||||
MFACode string `mapstructure:"mfa_code" required:"false"`
|
||||
// The profile to use in the shared credentials file for
|
||||
// AWS. See Amazon's documentation on [specifying
|
||||
// profiles](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-profiles)
|
||||
// for more details.
|
||||
ProfileName string `mapstructure:"profile" required:"false"`
|
||||
// The name of the region, such as `us-east-1`, in which
|
||||
// to launch the EC2 instance to create the AMI.
|
||||
// When chroot building, this value is guessed from environment.
|
||||
RawRegion string `mapstructure:"region" required:"true"`
|
||||
// The secret key used to communicate with AWS. [Learn how to set
|
||||
// this](/docs/builders/amazon#specifying-amazon-credentials). This is not required
|
||||
// if you are using `use_vault_aws_engine` for authentication instead.
|
||||
SecretKey string `mapstructure:"secret_key" required:"true"`
|
||||
// Set to true if you want to skip
|
||||
// validation of the ami_regions configuration option. Default false.
|
||||
SkipValidation bool `mapstructure:"skip_region_validation" required:"false"`
|
||||
SkipMetadataApiCheck bool `mapstructure:"skip_metadata_api_check"`
|
||||
// Set to true if you want to skip validating AWS credentials before runtime.
|
||||
SkipCredsValidation bool `mapstructure:"skip_credential_validation"`
|
||||
// The access token to use. This is different from the
|
||||
// access key and secret key. If you're not sure what this is, then you
|
||||
// probably don't need it. This will also be read from the AWS_SESSION_TOKEN
|
||||
// environmental variable.
|
||||
Token string `mapstructure:"token" required:"false"`
|
||||
session *session.Session
|
||||
// Get credentials from Hashicorp Vault's aws secrets engine. You must
|
||||
// already have created a role to use. For more information about
|
||||
// generating credentials via the Vault engine, see the [Vault
|
||||
// docs.](https://www.vaultproject.io/api/secret/aws#generate-credentials)
|
||||
// If you set this flag, you must also set the below options:
|
||||
// - `name` (string) - Required. Specifies the name of the role to generate
|
||||
// credentials against. This is part of the request URL.
|
||||
// - `engine_name` (string) - The name of the aws secrets engine. In the
|
||||
// Vault docs, this is normally referred to as "aws", and Packer will
|
||||
// default to "aws" if `engine_name` is not set.
|
||||
// - `role_arn` (string)- The ARN of the role to assume if credential\_type
|
||||
// on the Vault role is assumed\_role. Must match one of the allowed role
|
||||
// ARNs in the Vault role. Optional if the Vault role only allows a single
|
||||
// AWS role ARN; required otherwise.
|
||||
// - `ttl` (string) - Specifies the TTL for the use of the STS token. This
|
||||
// is specified as a string with a duration suffix. Valid only when
|
||||
// credential\_type is assumed\_role or federation\_token. When not
|
||||
// specified, the default\_sts\_ttl set for the role will be used. If that
|
||||
// is also not set, then the default value of 3600s will be used. AWS
|
||||
// places limits on the maximum TTL allowed. See the AWS documentation on
|
||||
// the DurationSeconds parameter for AssumeRole (for assumed\_role
|
||||
// credential types) and GetFederationToken (for federation\_token
|
||||
// credential types) for more details.
|
||||
//
|
||||
// JSON example:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "vault_aws_engine": {
|
||||
// "name": "myrole",
|
||||
// "role_arn": "myarn",
|
||||
// "ttl": "3600s"
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// HCL2 example:
|
||||
//
|
||||
// ```hcl
|
||||
// vault_aws_engine {
|
||||
// name = "myrole"
|
||||
// role_arn = "myarn"
|
||||
// ttl = "3600s"
|
||||
// }
|
||||
// ```
|
||||
VaultAWSEngine VaultAWSEngineOptions `mapstructure:"vault_aws_engine" required:"false"`
|
||||
// [Polling configuration](#polling-configuration) for the AWS waiter. Configures the waiter that checks
|
||||
// resource state.
|
||||
PollingConfig *AWSPollingConfig `mapstructure:"aws_polling" required:"false"`
|
||||
|
||||
getEC2Connection func() ec2iface.EC2API
|
||||
}
|
||||
|
||||
// Config returns a valid aws.Config object for access to AWS services, or
|
||||
// an error if the authentication and region couldn't be resolved
|
||||
func (c *AccessConfig) Session() (*session.Session, error) {
|
||||
if c.session != nil {
|
||||
return c.session, nil
|
||||
}
|
||||
|
||||
// Create new AWS config
|
||||
config := aws.NewConfig().WithCredentialsChainVerboseErrors(true)
|
||||
if c.MaxRetries > 0 {
|
||||
config = config.WithMaxRetries(c.MaxRetries)
|
||||
}
|
||||
|
||||
// Set AWS config defaults.
|
||||
if c.RawRegion != "" {
|
||||
config = config.WithRegion(c.RawRegion)
|
||||
}
|
||||
|
||||
if c.CustomEndpointEc2 != "" {
|
||||
config = config.WithEndpoint(c.CustomEndpointEc2)
|
||||
}
|
||||
|
||||
config = config.WithHTTPClient(cleanhttp.DefaultClient())
|
||||
transport := config.HTTPClient.Transport.(*http.Transport)
|
||||
if c.InsecureSkipTLSVerify {
|
||||
transport.TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
}
|
||||
transport.Proxy = http.ProxyFromEnvironment
|
||||
|
||||
// Figure out which possible credential providers are valid; test that we
|
||||
// can get credentials via the selected providers, and set the providers in
|
||||
// the config.
|
||||
creds, err := c.GetCredentials(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config.WithCredentials(creds)
|
||||
|
||||
// Create session options based on our AWS config
|
||||
opts := session.Options{
|
||||
SharedConfigState: session.SharedConfigEnable,
|
||||
Config: *config,
|
||||
}
|
||||
|
||||
if c.ProfileName != "" {
|
||||
opts.Profile = c.ProfileName
|
||||
}
|
||||
|
||||
if c.MFACode != "" {
|
||||
opts.AssumeRoleTokenProvider = func() (string, error) {
|
||||
return c.MFACode, nil
|
||||
}
|
||||
}
|
||||
|
||||
sess, err := session.NewSessionWithOptions(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Printf("Found region %s", *sess.Config.Region)
|
||||
c.session = sess
|
||||
|
||||
cp, err := c.session.Config.Credentials.Get()
|
||||
|
||||
if awserrors.Matches(err, "NoCredentialProviders", "") {
|
||||
return nil, c.NewNoValidCredentialSourcesError(err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err)
|
||||
}
|
||||
|
||||
log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName)
|
||||
|
||||
if c.DecodeAuthZMessages {
|
||||
DecodeAuthZMessages(c.session)
|
||||
}
|
||||
|
||||
return c.session, nil
|
||||
}
|
||||
|
||||
func (c *AccessConfig) SessionRegion() string {
|
||||
if c.session == nil {
|
||||
panic("access config session should be set.")
|
||||
}
|
||||
return aws.StringValue(c.session.Config.Region)
|
||||
}
|
||||
|
||||
func (c *AccessConfig) IsGovCloud() bool {
|
||||
return strings.HasPrefix(c.SessionRegion(), "us-gov-")
|
||||
}
|
||||
|
||||
func (c *AccessConfig) IsChinaCloud() bool {
|
||||
return strings.HasPrefix(c.SessionRegion(), "cn-")
|
||||
}
|
||||
|
||||
// GetCredentials gets credentials from the environment, shared credentials,
|
||||
// the session (which may include a credential process), or ECS/EC2 metadata
|
||||
// endpoints. GetCredentials also validates the credentials and the ability to
|
||||
// assume a role or will return an error if unsuccessful.
|
||||
func (c *AccessConfig) GetCredentials(config *aws.Config) (*awsCredentials.Credentials, error) {
|
||||
// Reload values into the config used by the Packer-Terraform shared SDK
|
||||
awsbaseConfig := &awsbase.Config{
|
||||
AccessKey: c.AccessKey,
|
||||
AssumeRoleARN: c.AssumeRole.AssumeRoleARN,
|
||||
AssumeRoleDurationSeconds: c.AssumeRole.AssumeRoleDurationSeconds,
|
||||
AssumeRoleExternalID: c.AssumeRole.AssumeRoleExternalID,
|
||||
AssumeRolePolicy: c.AssumeRole.AssumeRolePolicy,
|
||||
AssumeRolePolicyARNs: c.AssumeRole.AssumeRolePolicyARNs,
|
||||
AssumeRoleSessionName: c.AssumeRole.AssumeRoleSessionName,
|
||||
AssumeRoleTags: c.AssumeRole.AssumeRoleTags,
|
||||
AssumeRoleTransitiveTagKeys: c.AssumeRole.AssumeRoleTransitiveTagKeys,
|
||||
CredsFilename: c.CredsFilename,
|
||||
DebugLogging: false,
|
||||
// TODO: implement for Packer
|
||||
// IamEndpoint: c.Endpoints["iam"],
|
||||
Insecure: c.InsecureSkipTLSVerify,
|
||||
MaxRetries: c.MaxRetries,
|
||||
Profile: c.ProfileName,
|
||||
Region: c.RawRegion,
|
||||
SecretKey: c.SecretKey,
|
||||
SkipCredsValidation: c.SkipCredsValidation,
|
||||
SkipMetadataApiCheck: c.SkipMetadataApiCheck,
|
||||
// TODO: implement for Packer
|
||||
// SkipRequestingAccountId: c.SkipRequestingAccountId,
|
||||
// StsEndpoint: c.Endpoints["sts"],
|
||||
Token: c.Token,
|
||||
}
|
||||
|
||||
return awsbase.GetCredentials(awsbaseConfig)
|
||||
}
|
||||
|
||||
func (c *AccessConfig) GetCredsFromVault() error {
|
||||
// const EnvVaultAddress = "VAULT_ADDR"
|
||||
// const EnvVaultToken = "VAULT_TOKEN"
|
||||
vaultConfig := vaultapi.DefaultConfig()
|
||||
cli, err := vaultapi.NewClient(vaultConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting Vault client: %s", err)
|
||||
}
|
||||
if c.VaultAWSEngine.EngineName == "" {
|
||||
c.VaultAWSEngine.EngineName = "aws"
|
||||
}
|
||||
path := fmt.Sprintf("/%s/creds/%s", c.VaultAWSEngine.EngineName,
|
||||
c.VaultAWSEngine.Name)
|
||||
secret, err := cli.Logical().Read(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading vault secret: %s", err)
|
||||
}
|
||||
if secret == nil {
|
||||
return fmt.Errorf("Vault Secret does not exist at the given path.")
|
||||
}
|
||||
|
||||
c.AccessKey = secret.Data["access_key"].(string)
|
||||
c.SecretKey = secret.Data["secret_key"].(string)
|
||||
token := secret.Data["security_token"]
|
||||
if token != nil {
|
||||
c.Token = token.(string)
|
||||
} else {
|
||||
c.Token = ""
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error {
|
||||
var errs []error
|
||||
|
||||
if c.SkipMetadataApiCheck {
|
||||
log.Println("(WARN) skip_metadata_api_check ignored.")
|
||||
}
|
||||
|
||||
// Make sure it's obvious from the config how we're getting credentials:
|
||||
// Vault, Packer config, or environment.
|
||||
if !c.VaultAWSEngine.Empty() {
|
||||
if len(c.AccessKey) > 0 {
|
||||
errs = append(errs,
|
||||
fmt.Errorf("If you have set vault_aws_engine, you must not set"+
|
||||
" the access_key or secret_key."))
|
||||
}
|
||||
// Go ahead and grab those credentials from Vault now, so we can set
|
||||
// the keys and token now.
|
||||
err := c.GetCredsFromVault()
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
if (len(c.AccessKey) > 0) != (len(c.SecretKey) > 0) {
|
||||
errs = append(errs,
|
||||
fmt.Errorf("`access_key` and `secret_key` must both be either set or not set."))
|
||||
}
|
||||
|
||||
if c.PollingConfig == nil {
|
||||
c.PollingConfig = new(AWSPollingConfig)
|
||||
}
|
||||
c.PollingConfig.LogEnvOverrideWarnings()
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func (c *AccessConfig) NewNoValidCredentialSourcesError(err error) error {
|
||||
return fmt.Errorf("No valid credential sources found for AWS Builder. "+
|
||||
"Please see https://www.packer.io/docs/builders/amazon#authentication "+
|
||||
"for more information on providing credentials for the AWS Builder. "+
|
||||
"Error: %w", err)
|
||||
}
|
||||
|
||||
func (c *AccessConfig) NewEC2Connection() (ec2iface.EC2API, error) {
|
||||
if c.getEC2Connection != nil {
|
||||
return c.getEC2Connection(), nil
|
||||
}
|
||||
sess, err := c.Session()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ec2conn := ec2.New(sess)
|
||||
|
||||
return ec2conn, nil
|
||||
}
|
73
builder/amazon/common/access_config.hcl2spec.go
Normal file
73
builder/amazon/common/access_config.hcl2spec.go
Normal file
@ -0,0 +1,73 @@
|
||||
// Code generated by "mapstructure-to-hcl2 -type VaultAWSEngineOptions,AssumeRoleConfig"; DO NOT EDIT.
|
||||
package common
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// FlatAssumeRoleConfig is an auto-generated flat version of AssumeRoleConfig.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatAssumeRoleConfig struct {
|
||||
AssumeRoleARN *string `mapstructure:"role_arn" required:"false" cty:"role_arn" hcl:"role_arn"`
|
||||
AssumeRoleDurationSeconds *int `mapstructure:"duration_seconds" required:"false" cty:"duration_seconds" hcl:"duration_seconds"`
|
||||
AssumeRoleExternalID *string `mapstructure:"external_id" required:"false" cty:"external_id" hcl:"external_id"`
|
||||
AssumeRolePolicy *string `mapstructure:"policy" required:"false" cty:"policy" hcl:"policy"`
|
||||
AssumeRolePolicyARNs []string `mapstructure:"policy_arns" required:"false" cty:"policy_arns" hcl:"policy_arns"`
|
||||
AssumeRoleSessionName *string `mapstructure:"session_name" required:"false" cty:"session_name" hcl:"session_name"`
|
||||
AssumeRoleTags map[string]string `mapstructure:"tags" required:"false" cty:"tags" hcl:"tags"`
|
||||
AssumeRoleTransitiveTagKeys []string `mapstructure:"transitive_tag_keys" required:"false" cty:"transitive_tag_keys" hcl:"transitive_tag_keys"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatAssumeRoleConfig.
|
||||
// FlatAssumeRoleConfig is an auto-generated flat version of AssumeRoleConfig.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*AssumeRoleConfig) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatAssumeRoleConfig)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a AssumeRoleConfig.
|
||||
// This spec is used by HCL to read the fields of AssumeRoleConfig.
|
||||
// The decoded values from this spec will then be applied to a FlatAssumeRoleConfig.
|
||||
func (*FlatAssumeRoleConfig) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"role_arn": &hcldec.AttrSpec{Name: "role_arn", Type: cty.String, Required: false},
|
||||
"duration_seconds": &hcldec.AttrSpec{Name: "duration_seconds", Type: cty.Number, Required: false},
|
||||
"external_id": &hcldec.AttrSpec{Name: "external_id", Type: cty.String, Required: false},
|
||||
"policy": &hcldec.AttrSpec{Name: "policy", Type: cty.String, Required: false},
|
||||
"policy_arns": &hcldec.AttrSpec{Name: "policy_arns", Type: cty.List(cty.String), Required: false},
|
||||
"session_name": &hcldec.AttrSpec{Name: "session_name", Type: cty.String, Required: false},
|
||||
"tags": &hcldec.AttrSpec{Name: "tags", Type: cty.Map(cty.String), Required: false},
|
||||
"transitive_tag_keys": &hcldec.AttrSpec{Name: "transitive_tag_keys", Type: cty.List(cty.String), Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// FlatVaultAWSEngineOptions is an auto-generated flat version of VaultAWSEngineOptions.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatVaultAWSEngineOptions struct {
|
||||
Name *string `mapstructure:"name" cty:"name" hcl:"name"`
|
||||
RoleARN *string `mapstructure:"role_arn" cty:"role_arn" hcl:"role_arn"`
|
||||
TTL *string `mapstructure:"ttl" required:"false" cty:"ttl" hcl:"ttl"`
|
||||
EngineName *string `mapstructure:"engine_name" cty:"engine_name" hcl:"engine_name"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatVaultAWSEngineOptions.
|
||||
// FlatVaultAWSEngineOptions is an auto-generated flat version of VaultAWSEngineOptions.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*VaultAWSEngineOptions) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatVaultAWSEngineOptions)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a VaultAWSEngineOptions.
|
||||
// This spec is used by HCL to read the fields of VaultAWSEngineOptions.
|
||||
// The decoded values from this spec will then be applied to a FlatVaultAWSEngineOptions.
|
||||
func (*FlatVaultAWSEngineOptions) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"name": &hcldec.AttrSpec{Name: "name", Type: cty.String, Required: false},
|
||||
"role_arn": &hcldec.AttrSpec{Name: "role_arn", Type: cty.String, Required: false},
|
||||
"ttl": &hcldec.AttrSpec{Name: "ttl", Type: cty.String, Required: false},
|
||||
"engine_name": &hcldec.AttrSpec{Name: "engine_name", Type: cty.String, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
71
builder/amazon/common/access_config_test.go
Normal file
71
builder/amazon/common/access_config_test.go
Normal file
@ -0,0 +1,71 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
)
|
||||
|
||||
func testAccessConfig() *AccessConfig {
|
||||
return &AccessConfig{
|
||||
getEC2Connection: func() ec2iface.EC2API {
|
||||
return &mockEC2Client{}
|
||||
},
|
||||
PollingConfig: new(AWSPollingConfig),
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccessConfigPrepare_Region(t *testing.T) {
|
||||
c := testAccessConfig()
|
||||
|
||||
c.RawRegion = "us-east-12"
|
||||
err := c.ValidateRegion(c.RawRegion)
|
||||
if err == nil {
|
||||
t.Fatalf("should have region validation err: %s", c.RawRegion)
|
||||
}
|
||||
|
||||
c.RawRegion = "us-east-1"
|
||||
err = c.ValidateRegion(c.RawRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("shouldn't have region validation err: %s", c.RawRegion)
|
||||
}
|
||||
|
||||
c.RawRegion = "custom"
|
||||
err = c.ValidateRegion(c.RawRegion)
|
||||
if err == nil {
|
||||
t.Fatalf("should have region validation err: %s", c.RawRegion)
|
||||
}
|
||||
|
||||
c.RawRegion = "custom"
|
||||
c.SkipValidation = true
|
||||
// testing whole prepare func here; this is checking that validation is
|
||||
// skipped, so we don't need a mock connection
|
||||
if err := c.Prepare(nil); err != nil {
|
||||
t.Fatalf("shouldn't have err: %s", err)
|
||||
}
|
||||
|
||||
c.SkipValidation = false
|
||||
c.RawRegion = ""
|
||||
if err := c.Prepare(nil); err != nil {
|
||||
t.Fatalf("shouldn't have err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccessConfigPrepare_RegionRestricted(t *testing.T) {
|
||||
c := testAccessConfig()
|
||||
|
||||
// Create a Session with a custom region
|
||||
c.session = session.Must(session.NewSession(&aws.Config{
|
||||
Region: aws.String("us-gov-west-1"),
|
||||
}))
|
||||
|
||||
if err := c.Prepare(nil); err != nil {
|
||||
t.Fatalf("shouldn't have err: %s", err)
|
||||
}
|
||||
|
||||
if !c.IsGovCloud() {
|
||||
t.Fatal("We should be in gov region.")
|
||||
}
|
||||
}
|
310
builder/amazon/common/ami_config.go
Normal file
310
builder/amazon/common/ami_config.go
Normal file
@ -0,0 +1,310 @@
|
||||
//go:generate struct-markdown
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
|
||||
"github.com/hashicorp/packer/hcl2template"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/config"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
// AMIConfig is for common configuration related to creating AMIs.
|
||||
type AMIConfig struct {
|
||||
// The name of the resulting AMI that will appear when managing AMIs in the
|
||||
// AWS console or via APIs. This must be unique. To help make this unique,
|
||||
// use a function like timestamp (see [template
|
||||
// engine](/docs/templates/engine) for more info).
|
||||
AMIName string `mapstructure:"ami_name" required:"true"`
|
||||
// The description to set for the resulting
|
||||
// AMI(s). By default this description is empty. This is a
|
||||
// [template engine](/docs/templates/engine), see [Build template
|
||||
// data](#build-template-data) for more information.
|
||||
AMIDescription string `mapstructure:"ami_description" required:"false"`
|
||||
// The type of virtualization for the AMI
|
||||
// you are building. This option is required to register HVM images. Can be
|
||||
// paravirtual (default) or hvm.
|
||||
AMIVirtType string `mapstructure:"ami_virtualization_type" required:"false"`
|
||||
// A list of account IDs that have access to
|
||||
// launch the resulting AMI(s). By default no additional users other than the
|
||||
// user creating the AMI has permissions to launch it.
|
||||
AMIUsers []string `mapstructure:"ami_users" required:"false"`
|
||||
// A list of groups that have access to
|
||||
// launch the resulting AMI(s). By default no groups have permission to launch
|
||||
// the AMI. all will make the AMI publicly accessible.
|
||||
AMIGroups []string `mapstructure:"ami_groups" required:"false"`
|
||||
// A list of product codes to
|
||||
// associate with the AMI. By default no product codes are associated with the
|
||||
// AMI.
|
||||
AMIProductCodes []string `mapstructure:"ami_product_codes" required:"false"`
|
||||
// A list of regions to copy the AMI to.
|
||||
// Tags and attributes are copied along with the AMI. AMI copying takes time
|
||||
// depending on the size of the AMI, but will generally take many minutes.
|
||||
AMIRegions []string `mapstructure:"ami_regions" required:"false"`
|
||||
// Set to true if you want to skip
|
||||
// validation of the ami_regions configuration option. Default false.
|
||||
AMISkipRegionValidation bool `mapstructure:"skip_region_validation" required:"false"`
|
||||
// Key/value pair tags applied to the AMI. This is a [template
|
||||
// engine](/docs/templates/engine), see [Build template
|
||||
// data](#build-template-data) for more information.
|
||||
AMITags map[string]string `mapstructure:"tags" required:"false"`
|
||||
// Same as [`tags`](#tags) but defined as a singular repeatable block
|
||||
// containing a `key` and a `value` field. In HCL2 mode the
|
||||
// [`dynamic_block`](/docs/configuration/from-1.5/expressions#dynamic-blocks)
|
||||
// will allow you to create those programatically.
|
||||
AMITag hcl2template.KeyValues `mapstructure:"tag" required:"false"`
|
||||
// Enable enhanced networking (ENA but not SriovNetSupport) on
|
||||
// HVM-compatible AMIs. If set, add `ec2:ModifyInstanceAttribute` to your
|
||||
// AWS IAM policy.
|
||||
//
|
||||
// Note: you must make sure enhanced networking is enabled on your
|
||||
// instance. See [Amazon's documentation on enabling enhanced
|
||||
// networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
|
||||
AMIENASupport config.Trilean `mapstructure:"ena_support" required:"false"`
|
||||
// Enable enhanced networking (SriovNetSupport but not ENA) on
|
||||
// HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your
|
||||
// AWS IAM policy. Note: you must make sure enhanced networking is enabled
|
||||
// on your instance. See [Amazon's documentation on enabling enhanced
|
||||
// networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
|
||||
// Default `false`.
|
||||
AMISriovNetSupport bool `mapstructure:"sriov_support" required:"false"`
|
||||
// Force Packer to first deregister an existing
|
||||
// AMI if one with the same name already exists. Default false.
|
||||
AMIForceDeregister bool `mapstructure:"force_deregister" required:"false"`
|
||||
// Force Packer to delete snapshots
|
||||
// associated with AMIs, which have been deregistered by force_deregister.
|
||||
// Default false.
|
||||
AMIForceDeleteSnapshot bool `mapstructure:"force_delete_snapshot" required:"false"`
|
||||
// Whether or not to encrypt the resulting AMI when
|
||||
// copying a provisioned instance to an AMI. By default, Packer will keep
|
||||
// the encryption setting to what it was in the source image. Setting false
|
||||
// will result in an unencrypted image, and true will result in an encrypted
|
||||
// one.
|
||||
//
|
||||
// If you have used the `launch_block_device_mappings` to set an encryption
|
||||
// key and that key is the same as the one you want the image encrypted with
|
||||
// at the end, then you don't need to set this field; leaving it empty will
|
||||
// prevent an unnecessary extra copy step and save you some time.
|
||||
AMIEncryptBootVolume config.Trilean `mapstructure:"encrypt_boot" required:"false"`
|
||||
// ID, alias or ARN of the KMS key to use for AMI encryption. This
|
||||
// only applies to the main `region` -- any regions the AMI gets copied to
|
||||
// copied will be encrypted by the default EBS KMS key for that region,
|
||||
// unless you set region-specific keys in AMIRegionKMSKeyIDs.
|
||||
//
|
||||
// Set this value if you select `encrypt_boot`, but don't want to use the
|
||||
// region's default KMS key.
|
||||
//
|
||||
// If you have a custom kms key you'd like to apply to the launch volume,
|
||||
// and are only building in one region, it is more efficient to leave this
|
||||
// and `encrypt_boot` empty and to instead set the key id in the
|
||||
// launch_block_device_mappings (you can find an example below). This saves
|
||||
// potentially many minutes at the end of the build by preventing Packer
|
||||
// from having to copy and re-encrypt the image at the end of the build.
|
||||
//
|
||||
// For valid formats see *KmsKeyId* in the [AWS API docs -
|
||||
// CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
|
||||
// This field is validated by Packer, when using an alias, you will have to
|
||||
// prefix `kms_key_id` with `alias/`.
|
||||
AMIKmsKeyId string `mapstructure:"kms_key_id" required:"false"`
|
||||
// regions to copy the ami to, along with the custom kms key id (alias or
|
||||
// arn) to use for encryption for that region. Keys must match the regions
|
||||
// provided in `ami_regions`. If you just want to encrypt using a default
|
||||
// ID, you can stick with `kms_key_id` and `ami_regions`. If you want a
|
||||
// region to be encrypted with that region's default key ID, you can use an
|
||||
// empty string `""` instead of a key id in this map. (e.g. `"us-east-1":
|
||||
// ""`) However, you cannot use default key IDs if you are using this in
|
||||
// conjunction with `snapshot_users` -- in that situation you must use
|
||||
// custom keys. For valid formats see *KmsKeyId* in the [AWS API docs -
|
||||
// CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
|
||||
//
|
||||
// This option supercedes the `kms_key_id` option -- if you set both, and
|
||||
// they are different, Packer will respect the value in
|
||||
// `region_kms_key_ids` for your build region and silently disregard the
|
||||
// value provided in `kms_key_id`.
|
||||
AMIRegionKMSKeyIDs map[string]string `mapstructure:"region_kms_key_ids" required:"false"`
|
||||
// If true, Packer will not check whether an AMI with the `ami_name` exists
|
||||
// in the region it is building in. It will use an intermediary AMI name,
|
||||
// which it will not convert to an AMI in the build region. It will copy
|
||||
// the intermediary AMI into any regions provided in `ami_regions`, then
|
||||
// delete the intermediary AMI. Default `false`.
|
||||
AMISkipBuildRegion bool `mapstructure:"skip_save_build_region"`
|
||||
// Key/value pair tags to apply to snapshot. They will override AMI tags if
|
||||
// already applied to snapshot. This is a [template
|
||||
// engine](/docs/templates/engine), see [Build template
|
||||
// data](#build-template-data) for more information.
|
||||
SnapshotTags map[string]string `mapstructure:"snapshot_tags" required:"false"`
|
||||
// Same as [`snapshot_tags`](#snapshot_tags) but defined as a singular
|
||||
// repeatable block containing a `key` and a `value` field. In HCL2 mode the
|
||||
// [`dynamic_block`](/docs/configuration/from-1.5/expressions#dynamic-blocks)
|
||||
// will allow you to create those programatically.
|
||||
SnapshotTag hcl2template.KeyValues `mapstructure:"snapshot_tag" required:"false"`
|
||||
// A list of account IDs that have
|
||||
// access to create volumes from the snapshot(s). By default no additional
|
||||
// users other than the user creating the AMI has permissions to create
|
||||
// volumes from the backing snapshot(s).
|
||||
SnapshotUsers []string `mapstructure:"snapshot_users" required:"false"`
|
||||
// A list of groups that have access to
|
||||
// create volumes from the snapshot(s). By default no groups have permission
|
||||
// to create volumes from the snapshot(s). all will make the snapshot
|
||||
// publicly accessible.
|
||||
SnapshotGroups []string `mapstructure:"snapshot_groups" required:"false"`
|
||||
}
|
||||
|
||||
func stringInSlice(s []string, searchstr string) bool {
|
||||
for _, item := range s {
|
||||
if item == searchstr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *AMIConfig) Prepare(accessConfig *AccessConfig, ctx *interpolate.Context) []error {
|
||||
var errs []error
|
||||
|
||||
errs = append(errs, c.SnapshotTag.CopyOn(&c.SnapshotTags)...)
|
||||
errs = append(errs, c.AMITag.CopyOn(&c.AMITags)...)
|
||||
|
||||
if c.AMIName == "" {
|
||||
errs = append(errs, fmt.Errorf("ami_name must be specified"))
|
||||
}
|
||||
|
||||
// Make sure that if we have region_kms_key_ids defined,
|
||||
// the regions in region_kms_key_ids are also in ami_regions
|
||||
if len(c.AMIRegionKMSKeyIDs) > 0 {
|
||||
for kmsKeyRegion := range c.AMIRegionKMSKeyIDs {
|
||||
if !stringInSlice(c.AMIRegions, kmsKeyRegion) {
|
||||
errs = append(errs, fmt.Errorf("Region %s is in region_kms_key_ids but not in ami_regions", kmsKeyRegion))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
errs = append(errs, c.prepareRegions(accessConfig)...)
|
||||
|
||||
// Prevent sharing of default KMS key encrypted volumes with other aws users
|
||||
if len(c.AMIUsers) > 0 {
|
||||
if len(c.AMIKmsKeyId) == 0 && c.AMIEncryptBootVolume.True() {
|
||||
errs = append(errs, fmt.Errorf("Cannot share AMI encrypted with default KMS key"))
|
||||
}
|
||||
if len(c.AMIRegionKMSKeyIDs) > 0 {
|
||||
for _, kmsKey := range c.AMIRegionKMSKeyIDs {
|
||||
if len(kmsKey) == 0 {
|
||||
errs = append(errs, fmt.Errorf("Cannot share AMI encrypted with default KMS key for other regions"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
kmsKeys := make([]string, 0)
|
||||
if len(c.AMIKmsKeyId) > 0 {
|
||||
kmsKeys = append(kmsKeys, c.AMIKmsKeyId)
|
||||
}
|
||||
if len(c.AMIRegionKMSKeyIDs) > 0 {
|
||||
for _, kmsKey := range c.AMIRegionKMSKeyIDs {
|
||||
if len(kmsKey) > 0 {
|
||||
kmsKeys = append(kmsKeys, kmsKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(kmsKeys) > 0 && !c.AMIEncryptBootVolume.True() {
|
||||
errs = append(errs, fmt.Errorf("If you have set either "+
|
||||
"region_kms_key_ids or kms_key_id, encrypt_boot must also be true."))
|
||||
|
||||
}
|
||||
for _, kmsKey := range kmsKeys {
|
||||
if !validateKmsKey(kmsKey) {
|
||||
errs = append(errs, fmt.Errorf("%q is not a valid KMS Key Id.", kmsKey))
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.SnapshotUsers) > 0 {
|
||||
if len(c.AMIKmsKeyId) == 0 && len(c.AMIRegionKMSKeyIDs) == 0 && c.AMIEncryptBootVolume.True() {
|
||||
errs = append(errs, fmt.Errorf("Cannot share snapshot encrypted "+
|
||||
"with default KMS key, see https://www.packer.io/docs/builders/amazon-ebs#region_kms_key_ids for more information"))
|
||||
}
|
||||
if len(c.AMIRegionKMSKeyIDs) > 0 {
|
||||
for _, kmsKey := range c.AMIRegionKMSKeyIDs {
|
||||
if len(kmsKey) == 0 {
|
||||
errs = append(errs, fmt.Errorf("Cannot share snapshot encrypted with default KMS key"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.AMIName) < 3 || len(c.AMIName) > 128 {
|
||||
errs = append(errs, fmt.Errorf("ami_name must be between 3 and 128 characters long"))
|
||||
}
|
||||
|
||||
if c.AMIName != templateCleanAMIName(c.AMIName) {
|
||||
errs = append(errs, fmt.Errorf("AMIName should only contain "+
|
||||
"alphanumeric characters, parentheses (()), square brackets ([]), spaces "+
|
||||
"( ), periods (.), slashes (/), dashes (-), single quotes ('), at-signs "+
|
||||
"(@), or underscores(_). You can use the `clean_resource_name` template "+
|
||||
"filter to automatically clean your ami name."))
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *AMIConfig) prepareRegions(accessConfig *AccessConfig) (errs []error) {
|
||||
if len(c.AMIRegions) > 0 {
|
||||
regionSet := make(map[string]struct{})
|
||||
regions := make([]string, 0, len(c.AMIRegions))
|
||||
|
||||
for _, region := range c.AMIRegions {
|
||||
// If we already saw the region, then don't look again
|
||||
if _, ok := regionSet[region]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Mark that we saw the region
|
||||
regionSet[region] = struct{}{}
|
||||
|
||||
// Make sure that if we have region_kms_key_ids defined,
|
||||
// the regions in ami_regions are also in region_kms_key_ids
|
||||
if len(c.AMIRegionKMSKeyIDs) > 0 {
|
||||
if _, ok := c.AMIRegionKMSKeyIDs[region]; !ok {
|
||||
errs = append(errs, fmt.Errorf("Region %s is in ami_regions but not in region_kms_key_ids", region))
|
||||
}
|
||||
}
|
||||
if (accessConfig != nil) && (region == accessConfig.RawRegion) {
|
||||
// make sure we don't try to copy to the region we originally
|
||||
// create the AMI in.
|
||||
log.Printf("Cannot copy AMI to AWS session region '%s', deleting it from `ami_regions`.", region)
|
||||
continue
|
||||
}
|
||||
regions = append(regions, region)
|
||||
}
|
||||
|
||||
c.AMIRegions = regions
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
// See https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html
|
||||
func validateKmsKey(kmsKey string) (valid bool) {
|
||||
kmsKeyIdPattern := `[a-f0-9-]+$`
|
||||
aliasPattern := `alias/[a-zA-Z0-9:/_-]+$`
|
||||
kmsArnStartPattern := `^arn:aws(-us-gov)?:kms:([a-z]{2}-(gov-)?[a-z]+-\d{1})?:(\d{12}):`
|
||||
if regexp.MustCompile(fmt.Sprintf("^%s", kmsKeyIdPattern)).MatchString(kmsKey) {
|
||||
return true
|
||||
}
|
||||
if regexp.MustCompile(fmt.Sprintf("^%s", aliasPattern)).MatchString(kmsKey) {
|
||||
return true
|
||||
}
|
||||
if regexp.MustCompile(fmt.Sprintf("%skey/%s", kmsArnStartPattern, kmsKeyIdPattern)).MatchString(kmsKey) {
|
||||
return true
|
||||
}
|
||||
if regexp.MustCompile(fmt.Sprintf("%s%s", kmsArnStartPattern, aliasPattern)).MatchString(kmsKey) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
249
builder/amazon/common/ami_config_test.go
Normal file
249
builder/amazon/common/ami_config_test.go
Normal file
@ -0,0 +1,249 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/config"
|
||||
)
|
||||
|
||||
func testAMIConfig() *AMIConfig {
|
||||
return &AMIConfig{
|
||||
AMIName: "foo",
|
||||
}
|
||||
}
|
||||
|
||||
func getFakeAccessConfig(region string) *AccessConfig {
|
||||
c := testAccessConfig()
|
||||
c.RawRegion = region
|
||||
return c
|
||||
}
|
||||
|
||||
func TestAMIConfigPrepare_name(t *testing.T) {
|
||||
c := testAMIConfig()
|
||||
accessConf := testAccessConfig()
|
||||
if err := c.Prepare(accessConf, nil); err != nil {
|
||||
t.Fatalf("shouldn't have err: %s", err)
|
||||
}
|
||||
|
||||
c.AMIName = ""
|
||||
if err := c.Prepare(accessConf, nil); err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
}
|
||||
|
||||
type mockEC2Client struct {
|
||||
ec2iface.EC2API
|
||||
}
|
||||
|
||||
func (m *mockEC2Client) DescribeRegions(*ec2.DescribeRegionsInput) (*ec2.DescribeRegionsOutput, error) {
|
||||
return &ec2.DescribeRegionsOutput{
|
||||
Regions: []*ec2.Region{
|
||||
{RegionName: aws.String("us-east-1")},
|
||||
{RegionName: aws.String("us-east-2")},
|
||||
{RegionName: aws.String("us-west-1")},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestAMIConfigPrepare_regions(t *testing.T) {
|
||||
c := testAMIConfig()
|
||||
c.AMIRegions = nil
|
||||
|
||||
var errs []error
|
||||
var err error
|
||||
accessConf := testAccessConfig()
|
||||
mockConn := &mockEC2Client{}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatalf("shouldn't have err: %#v", errs)
|
||||
}
|
||||
|
||||
c.AMIRegions, err = listEC2Regions(mockConn)
|
||||
if err != nil {
|
||||
t.Fatalf("shouldn't have err: %s", err.Error())
|
||||
}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatalf("shouldn't have err: %#v", errs)
|
||||
}
|
||||
|
||||
c.AMIRegions = []string{"us-east-1", "us-west-1", "us-east-1"}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatalf("bad: %s", errs[0])
|
||||
}
|
||||
|
||||
expected := []string{"us-east-1", "us-west-1"}
|
||||
if !reflect.DeepEqual(c.AMIRegions, expected) {
|
||||
t.Fatalf("bad: %#v", c.AMIRegions)
|
||||
}
|
||||
|
||||
c.AMIRegions = []string{"custom"}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatal("shouldn't have error")
|
||||
}
|
||||
|
||||
c.AMIRegions = []string{"us-east-1", "us-east-2", "us-west-1"}
|
||||
c.AMIRegionKMSKeyIDs = map[string]string{
|
||||
"us-east-1": "123-456-7890",
|
||||
"us-west-1": "789-012-3456",
|
||||
"us-east-2": "456-789-0123",
|
||||
}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatal(fmt.Sprintf("shouldn't have error: %s", errs[0]))
|
||||
}
|
||||
|
||||
c.AMIRegions = []string{"us-east-1", "us-east-2", "us-west-1"}
|
||||
c.AMIRegionKMSKeyIDs = map[string]string{
|
||||
"us-east-1": "123-456-7890",
|
||||
"us-west-1": "789-012-3456",
|
||||
"us-east-2": "",
|
||||
}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatal("should have passed; we are able to use default KMS key if not sharing")
|
||||
}
|
||||
|
||||
c.SnapshotUsers = []string{"user-foo", "user-bar"}
|
||||
c.AMIRegions = []string{"us-east-1", "us-east-2", "us-west-1"}
|
||||
c.AMIRegionKMSKeyIDs = map[string]string{
|
||||
"us-east-1": "123-456-7890",
|
||||
"us-west-1": "789-012-3456",
|
||||
"us-east-2": "",
|
||||
}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatal("should have an error b/c can't use default KMS key if sharing")
|
||||
}
|
||||
|
||||
c.AMIRegions = []string{"us-east-1", "us-west-1"}
|
||||
c.AMIRegionKMSKeyIDs = map[string]string{
|
||||
"us-east-1": "123-456-7890",
|
||||
"us-west-1": "789-012-3456",
|
||||
"us-east-2": "456-789-0123",
|
||||
}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatal("should have error b/c theres a region in the key map that isn't in ami_regions")
|
||||
}
|
||||
|
||||
c.AMIRegions = []string{"us-east-1", "us-west-1", "us-east-2"}
|
||||
c.AMIRegionKMSKeyIDs = map[string]string{
|
||||
"us-east-1": "123-456-7890",
|
||||
"us-west-1": "789-012-3456",
|
||||
}
|
||||
|
||||
if err := c.Prepare(accessConf, nil); err == nil {
|
||||
t.Fatal("should have error b/c theres a region in in ami_regions that isn't in the key map")
|
||||
}
|
||||
|
||||
c.SnapshotUsers = []string{"foo", "bar"}
|
||||
c.AMIKmsKeyId = "123-abc-456"
|
||||
c.AMIEncryptBootVolume = config.TriTrue
|
||||
c.AMIRegions = []string{"us-east-1", "us-west-1"}
|
||||
c.AMIRegionKMSKeyIDs = map[string]string{
|
||||
"us-east-1": "123-456-7890",
|
||||
"us-west-1": "",
|
||||
}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatal("should have error b/c theres a region in in ami_regions that isn't in the key map")
|
||||
}
|
||||
|
||||
// allow rawregion to exist in ami_regions list.
|
||||
accessConf = getFakeAccessConfig("us-east-1")
|
||||
c.AMIRegions = []string{"us-east-1", "us-west-1", "us-east-2"}
|
||||
c.AMIRegionKMSKeyIDs = nil
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatal("should allow user to have the raw region in ami_regions")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAMIConfigPrepare_Share_EncryptedBoot(t *testing.T) {
|
||||
c := testAMIConfig()
|
||||
c.AMIUsers = []string{"testAccountID"}
|
||||
c.AMIEncryptBootVolume = config.TriTrue
|
||||
|
||||
accessConf := testAccessConfig()
|
||||
|
||||
c.AMIKmsKeyId = ""
|
||||
if err := c.Prepare(accessConf, nil); err == nil {
|
||||
t.Fatal("shouldn't be able to share ami with encrypted boot volume")
|
||||
}
|
||||
c.AMIKmsKeyId = "89c3fb9a-de87-4f2a-aedc-fddc5138193c"
|
||||
if err := c.Prepare(accessConf, nil); err != nil {
|
||||
t.Fatal("should be able to share ami with encrypted boot volume")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAMIConfigPrepare_ValidateKmsKey(t *testing.T) {
|
||||
c := testAMIConfig()
|
||||
c.AMIEncryptBootVolume = config.TriTrue
|
||||
|
||||
accessConf := testAccessConfig()
|
||||
|
||||
validCases := []string{
|
||||
"abcd1234-e567-890f-a12b-a123b4cd56ef",
|
||||
"alias/foo/bar",
|
||||
"arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef",
|
||||
"arn:aws:kms:us-east-1:012345678910:alias/foo/bar",
|
||||
"arn:aws-us-gov:kms:us-gov-east-1:123456789012:key/12345678-1234-abcd-0000-123456789012",
|
||||
}
|
||||
for _, validCase := range validCases {
|
||||
c.AMIKmsKeyId = validCase
|
||||
if err := c.Prepare(accessConf, nil); err != nil {
|
||||
t.Fatalf("%s should not have failed KMS key validation", validCase)
|
||||
}
|
||||
}
|
||||
|
||||
invalidCases := []string{
|
||||
"ABCD1234-e567-890f-a12b-a123b4cd56ef",
|
||||
"ghij1234-e567-890f-a12b-a123b4cd56ef",
|
||||
"ghij1234+e567_890f-a12b-a123b4cd56ef",
|
||||
"foo/bar",
|
||||
"arn:aws:kms:us-east-1:012345678910:foo/bar",
|
||||
"arn:foo:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef",
|
||||
}
|
||||
for _, invalidCase := range invalidCases {
|
||||
c.AMIKmsKeyId = invalidCase
|
||||
if err := c.Prepare(accessConf, nil); err == nil {
|
||||
t.Fatalf("%s should have failed KMS key validation", invalidCase)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAMINameValidation(t *testing.T) {
|
||||
c := testAMIConfig()
|
||||
|
||||
accessConf := testAccessConfig()
|
||||
|
||||
c.AMIName = "aa"
|
||||
if err := c.Prepare(accessConf, nil); err == nil {
|
||||
t.Fatal("shouldn't be able to have an ami name with less than 3 characters")
|
||||
}
|
||||
|
||||
var longAmiName string
|
||||
for i := 0; i < 129; i++ {
|
||||
longAmiName += "a"
|
||||
}
|
||||
c.AMIName = longAmiName
|
||||
if err := c.Prepare(accessConf, nil); err == nil {
|
||||
t.Fatal("shouldn't be able to have an ami name with great than 128 characters")
|
||||
}
|
||||
|
||||
c.AMIName = "+aaa"
|
||||
if err := c.Prepare(accessConf, nil); err == nil {
|
||||
t.Fatal("shouldn't be able to have an ami name with invalid characters")
|
||||
}
|
||||
|
||||
c.AMIName = "fooBAR1()[] ./-'@_"
|
||||
if err := c.Prepare(accessConf, nil); err != nil {
|
||||
t.Fatal("should be able to use all of the allowed AMI characters")
|
||||
}
|
||||
|
||||
c.AMIName = `xyz-base-2017-04-05-1934`
|
||||
if err := c.Prepare(accessConf, nil); err != nil {
|
||||
t.Fatalf("expected `xyz-base-2017-04-05-1934` to pass validation.")
|
||||
}
|
||||
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user