Merge remote-tracking branch 'origin/master' into pr/teddylear/10457
This commit is contained in:
commit
4e22147909
|
@ -7,7 +7,7 @@ version: 2.1
|
|||
executors:
|
||||
golang:
|
||||
docker:
|
||||
- image: docker.mirror.hashicorp.services/circleci/golang:1.15
|
||||
- image: docker.mirror.hashicorp.services/circleci/golang:1.16
|
||||
resource_class: medium+
|
||||
darwin:
|
||||
macos:
|
||||
|
@ -61,13 +61,11 @@ jobs:
|
|||
file: coverage.txt
|
||||
test-darwin:
|
||||
executor: darwin
|
||||
working_directory: ~/go/src/github.com/hashicorp/packer
|
||||
environment:
|
||||
GO111MODULE: "off"
|
||||
working_directory: ~/go/github.com/hashicorp/packer
|
||||
steps:
|
||||
- install-go-run-tests-unix:
|
||||
GOOS: darwin
|
||||
GOVERSION: "1.15"
|
||||
GOVERSION: "1.16"
|
||||
- codecov/upload:
|
||||
file: coverage.txt
|
||||
test-windows:
|
||||
|
@ -76,7 +74,7 @@ jobs:
|
|||
shell: bash.exe
|
||||
steps:
|
||||
- install-go-run-tests-windows:
|
||||
GOVERSION: "1.15"
|
||||
GOVERSION: "1.16"
|
||||
- codecov/upload:
|
||||
file: coverage.txt
|
||||
check-lint:
|
||||
|
|
|
@ -19,7 +19,7 @@ can quickly merge or address your contributions.
|
|||
already fixed the bug you're experiencing.
|
||||
|
||||
- Run the command with debug output with the environment variable `PACKER_LOG`.
|
||||
For example: `PACKER_LOG=1 packer build template.json`. Take the _entire_
|
||||
For example: `PACKER_LOG=1 packer build template.pkr.hcl`. Take the _entire_
|
||||
output and create a [gist](https://gist.github.com) for linking to in your
|
||||
issue. Packer should strip sensitive keys from the output, but take a look
|
||||
through just in case.
|
||||
|
@ -64,7 +64,9 @@ can quickly merge or address your contributions.
|
|||
If you have never worked with Go before, you will have to install its
|
||||
runtime in order to build packer.
|
||||
|
||||
1. This project always releases from the latest version of golang. [Install go](https://golang.org/doc/install#install)
|
||||
1. This project always releases from the latest version of golang.
|
||||
[Install go](https://golang.org/doc/install#install) To properly build from
|
||||
source, you need to have golang >= v1.16
|
||||
|
||||
## Setting up Packer for dev
|
||||
|
||||
|
@ -72,7 +74,6 @@ If/when you have go installed you can already `go get` packer and `make` in
|
|||
order to compile and test Packer. These instructions target
|
||||
POSIX-like environments (macOS, Linux, Cygwin, etc.) so you may need to
|
||||
adjust them for Windows or other shells.
|
||||
The instructions below are for go 1.7. or later.
|
||||
|
||||
1. Download the Packer source (and its dependencies) by running
|
||||
`go get github.com/hashicorp/packer`. This will download the Packer source to
|
||||
|
@ -91,7 +92,7 @@ The instructions below are for go 1.7. or later.
|
|||
4. After running building Packer successfully, use
|
||||
`$GOPATH/src/github.com/hashicorp/packer/bin/packer` to build a machine and
|
||||
verify your changes work. For instance:
|
||||
`$GOPATH/src/github.com/hashicorp/packer/bin/packer build template.json`.
|
||||
`$GOPATH/src/github.com/hashicorp/packer/bin/packer build template.pkr.hcl`.
|
||||
|
||||
5. If everything works well and the tests pass, run `go fmt` on your code before
|
||||
submitting a pull-request.
|
||||
|
|
73
CHANGELOG.md
73
CHANGELOG.md
|
@ -1,22 +1,63 @@
|
|||
## 1.7.0 (Upcoming)
|
||||
## 1.7.1 (Upcoming)
|
||||
|
||||
### IMPROVEMENTS
|
||||
* builder/amazon: allow creation of ebs snapshots wihtout volumes. [GH-9591]
|
||||
* builder/scaleway: add support for timeout in shutdown step. [GH-10503]
|
||||
* builder/virtualbox: Add template options for chipset, firmware, nic, graphics
|
||||
controller, and audio controller. [GH-10671]
|
||||
* builder/virtualbox: Support for "virtio" storage and ISO drive. [GH-10632]
|
||||
* builder/vmware: Added "attach_snapshot" parameter to vmware vmx builder.
|
||||
[GH-10651]
|
||||
* core: Change template parsing error to include warning about file extensions.
|
||||
[GH-10652]
|
||||
* hcl2_upgrade: hcl2_upgrade command can now upgrade json var-files [GH-10676]
|
||||
|
||||
### BUG FIXES
|
||||
* builder/amazon: Update amazon SDK to fix an SSO login issue. [GH-10668]
|
||||
* builder/azure: Don't overwrite subscription id if unset. [GH-10659]
|
||||
* builder/oracle-oci: Update Oracle Go SDK to fix issue with reading key file.
|
||||
[GH-10560]
|
||||
* builder/vmware: Added a fallback file check when trying to determine the
|
||||
network-mapping configuration. [GH-10543]
|
||||
* core/hcl2_upgrade: Make hcl2_upgrade command correctly translate
|
||||
pause_before. [GH-10654]
|
||||
* core: Templates previously could not interpolate the environment variable
|
||||
PACKER_LOG_PATH. [GH-10660]
|
||||
* provisioner/chef-solo: HCL2 templates can support the json_string option.
|
||||
[GH-10655]
|
||||
|
||||
## 1.7.0 (February 17, 2021)
|
||||
|
||||
### FEATURES
|
||||
** New Command** (HCL only) `packer init` command will download plugins defined
|
||||
in a new `required_plugins` block [GH-10304]
|
||||
** New Plugin Type** Data sources can be implemented (blog post forthcoming).
|
||||
* **New Command** (HCL only) `packer init` command will download plugins defined
|
||||
in a new `required_plugins` block [GH-10304] [GH-10633].
|
||||
* **New Plugin Type** Data sources can be implemented (blog post forthcoming).
|
||||
[GH-10440]
|
||||
** New Plugin** Aws Secrets Manager data source [GH-10505] [GH-10467]
|
||||
* **New Plugin** Aws Secrets Manager data source [GH-10505] [GH-10467]
|
||||
|
||||
### BACKWARDS INCOMPATIBILITIES
|
||||
* core: The API that the Packer core uses to communicate with community plugins
|
||||
has changed; maintainers of community plugins will need to upgrade their
|
||||
plugins in order to make them compatible with v1.7.0. An upgrade guide will
|
||||
be available on our guides page https://www.packer.io/guides.
|
||||
|
||||
### IMPROVEMENTS
|
||||
* builder/amazon: Add `skip_create_ami` option for testing and situations where
|
||||
artifact is not the ami. [GH-10531]
|
||||
* builder/amazon: Add IMDSv2 support for AWS EBS builder [GH-10546]
|
||||
* builder/amazon: Add IMDSv2 support for AWS EBS builder. [GH-10546]
|
||||
* builder/amazon: Add resource tags in the launch template used to request spot
|
||||
instances. [GH-10456]
|
||||
* builder/openstack: Add `skip_create_image` option for testing and situations
|
||||
where artifact is not the image. [GH-10496]
|
||||
* builder/oracle-oci: Add retry strategies to oci calls [GH-10591]
|
||||
* core/fmt: The `packer fmt` can now read from stdin. [GH-10500]
|
||||
* core/hcl: Templates now support "sensitive" locals [GH-10509]
|
||||
* core/hcl: Add regex and regexall hcl2 template functions. [GH-10601]
|
||||
* core/hcl: Templates now support "sensitive" locals. [GH-10509]
|
||||
* core/hcl: Templates now support error-cleanup-provisioner. [GH-10604]
|
||||
* hcl2_upgrade: Command now comes with a flag so you can control whether output
|
||||
templates are annotated with helpful comments. [GH-10619]
|
||||
* hcl2_upgrade: Command now gracefully handles options with template engine
|
||||
interpolations. [GH-10625]
|
||||
* hcl2_upgrade: Command will convert amazon filters to use the ami data source.
|
||||
[GH-10491]
|
||||
|
||||
|
@ -25,16 +66,19 @@
|
|||
snapshot. [GH-10150]
|
||||
* builder/amazon: Fix bug where validation fails if optional iops value is
|
||||
unset. [GH-10518]
|
||||
* builder/bsusurrogate: override bsu when omi root device is set [GH-10490]
|
||||
* builder/amazon: Wrap API call to get filtered image in a retry. [GH-10610]
|
||||
* builder/bsusurrogate: override bsu when omi root device is set. [GH-10490]
|
||||
* builder/google: Fix bug where Packer would fail when run by users who do not
|
||||
have permission to access the metadata, even though the metadata is not
|
||||
necessary to the run. [GH-10458]
|
||||
* builder/profitbricks: Profitbricks builder could not connect using SSH
|
||||
communicator [GH-10549]
|
||||
* builder/proxmox: Improve cloud init error logging for proxmox builder
|
||||
communicator. [GH-10549]
|
||||
* builder/proxmox: Ensure ISOs in additional_iso_files are mounted during VM
|
||||
creation. [GH-10586]
|
||||
* builder/proxmox: Improve cloud init error logging for proxmox builder.
|
||||
[GH-10499]
|
||||
* builder/qemu: Fix bug where vnc_min_port set to value greater then 5900 could
|
||||
prevent Packer from connecting to QEMU. [GH-10450]
|
||||
prevent Packer from connecting to QEMU. [GH-10450] [GH-10451]
|
||||
* builder/qemu: Fix regression with cd indexing when disk_interface is `ide`.
|
||||
[GH-10519]
|
||||
* builder/vmware-esx: Skip credential validation, which requires ovftool to be
|
||||
|
@ -42,7 +86,10 @@
|
|||
* builder/yandex: Fix cloud-init config for ubuntu 20.04. [GH-10522]
|
||||
* builder/yandex: Fix incorrect access to `instance_id`. [GH-10522]
|
||||
* core/hcl: Fix bug where []uint8 types could not be passed to plugins.
|
||||
* core/hcl: Fix force flag for hcl2 provisioners and post-processors [GH-10571]
|
||||
* core/hcl: fix bug where HCL core could not handle passing []uint8 to plugins.
|
||||
[GH-10516]
|
||||
* core/hcl: Fix force flag for hcl2 provisioners and post-processors.
|
||||
[GH-10571]
|
||||
* post-processor/vsphere: Fix regression where Packer would not check the exit
|
||||
status after streaming UI from the ovftool command. [GH-10468]
|
||||
* post-processor/yandex-export: Changed dhclient command and supported
|
||||
|
@ -58,7 +105,7 @@
|
|||
[GH-10377]
|
||||
* **New function** `env` allows users to set the default value of a variable to
|
||||
the value of an environment variable. Please see [env function
|
||||
docs](https://www.packer.io/docs/templates/hcl_templates/functions/contextual/env") for
|
||||
docs](https://www.packer.io/docs/templates/hcl_templates/functions/contextual/env) for
|
||||
more details. [GH-10240]
|
||||
* **Future Scaffolding** This release contains a large number of no-op
|
||||
refactoring changes. The Packer team at HashiCorp is preparing to split the
|
||||
|
|
|
@ -87,7 +87,6 @@
|
|||
|
||||
/post-processor/alicloud-import/ dongxiao.zzh@alibaba-inc.com
|
||||
/post-processor/checksum/ v.tolstov@selfip.ru
|
||||
/post-processor/exoscale-import/ @falzm @mcorbin
|
||||
/post-processor/googlecompute-export/ crunkleton@google.com
|
||||
/post-processor/yandex-export/ @GennadySpb
|
||||
/post-processor/yandex-import/ @GennadySpb
|
||||
|
|
69
README.md
69
README.md
|
@ -1,7 +1,7 @@
|
|||
# Packer
|
||||
|
||||
[![Build Status][circleci-badge]][circleci]
|
||||
[![Windows Build Status][appveyor-badge]][appveyor]
|
||||
[![Discuss](https://img.shields.io/badge/discuss-packer-3d89ff?style=flat)](https://discuss.hashicorp.com/c/packer)
|
||||
[![PkgGoDev](https://pkg.go.dev/badge/github.com/hashicorp/packer)](https://pkg.go.dev/github.com/hashicorp/packer)
|
||||
[![GoReportCard][report-badge]][report]
|
||||
[![codecov](https://codecov.io/gh/hashicorp/packer/branch/master/graph/badge.svg)](https://codecov.io/gh/hashicorp/packer)
|
||||
|
@ -9,15 +9,16 @@
|
|||
[circleci-badge]: https://circleci.com/gh/hashicorp/packer.svg?style=svg
|
||||
[circleci]: https://app.circleci.com/pipelines/github/hashicorp/packer
|
||||
[appveyor-badge]: https://ci.appveyor.com/api/projects/status/miavlgnp989e5obc/branch/master?svg=true
|
||||
[appveyor]: https://ci.appveyor.com/project/hashicorp/packer
|
||||
[godoc-badge]: https://godoc.org/github.com/hashicorp/packer?status.svg
|
||||
[godoc]: https://godoc.org/github.com/hashicorp/packer
|
||||
[report-badge]: https://goreportcard.com/badge/github.com/hashicorp/packer
|
||||
[godoc]: https://godoc.org/github.com/hashicorp/packer
|
||||
[report-badge]: https://goreportcard.com/badge/github.com/hashicorp/packer
|
||||
[report]: https://goreportcard.com/report/github.com/hashicorp/packer
|
||||
|
||||
* Website: https://www.packer.io
|
||||
* IRC: `#packer-tool` on Freenode
|
||||
* Mailing list: [Google Groups](https://groups.google.com/forum/#!forum/packer-tool)
|
||||
<p align="center" style="text-align:center;">
|
||||
<a href="https://www.packer.io">
|
||||
<img alt="HashiCorp Packer logo" src="website/public/img/logo-packer-padded.svg" width="500" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
Packer is a tool for building identical machine images for multiple platforms
|
||||
from a single source configuration.
|
||||
|
@ -47,33 +48,43 @@ yourself](https://github.com/hashicorp/packer/blob/master/.github/CONTRIBUTING.m
|
|||
|
||||
After Packer is installed, create your first template, which tells Packer
|
||||
what platforms to build images for and how you want to build them. In our
|
||||
case, we'll create a simple AMI that has Redis pre-installed. Save this
|
||||
file as `quick-start.json`. Export your AWS credentials as the
|
||||
case, we'll create a simple AMI that has Redis pre-installed.
|
||||
|
||||
Save this file as `quick-start.pkr.hcl`. Export your AWS credentials as the
|
||||
`AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables.
|
||||
|
||||
```json
|
||||
{
|
||||
"variables": {
|
||||
"access_key": "{{env `AWS_ACCESS_KEY_ID`}}",
|
||||
"secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}"
|
||||
},
|
||||
"builders": [{
|
||||
"type": "amazon-ebs",
|
||||
"access_key": "{{user `access_key`}}",
|
||||
"secret_key": "{{user `secret_key`}}",
|
||||
"region": "us-east-1",
|
||||
"source_ami": "ami-af22d9b9",
|
||||
"instance_type": "t2.micro",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "packer-example {{timestamp}}"
|
||||
}]
|
||||
```hcl
|
||||
variable "access_key" {
|
||||
type = string
|
||||
default = "${env("AWS_ACCESS_KEY_ID")}"
|
||||
}
|
||||
|
||||
variable "secret_key" {
|
||||
type = string
|
||||
default = "${env("AWS_SECRET_ACCESS_KEY")}"
|
||||
}
|
||||
|
||||
locals { timestamp = regex_replace(timestamp(), "[- TZ:]", "") }
|
||||
|
||||
source "amazon-ebs" "quick-start" {
|
||||
access_key = "${var.access_key}"
|
||||
ami_name = "packer-example ${local.timestamp}"
|
||||
instance_type = "t2.micro"
|
||||
region = "us-east-1"
|
||||
secret_key = "${var.secret_key}"
|
||||
source_ami = "ami-af22d9b9"
|
||||
ssh_username = "ubuntu"
|
||||
}
|
||||
|
||||
build {
|
||||
sources = ["source.amazon-ebs.quick-start"]
|
||||
}
|
||||
```
|
||||
|
||||
Next, tell Packer to build the image:
|
||||
|
||||
```
|
||||
$ packer build quick-start.json
|
||||
$ packer build quick-start.pkr.hcl
|
||||
...
|
||||
```
|
||||
|
||||
|
@ -85,11 +96,9 @@ they're run, etc., is up to you.
|
|||
|
||||
## Documentation
|
||||
|
||||
Comprehensive documentation is viewable on the Packer website:
|
||||
Comprehensive documentation is viewable on the Packer website at https://www.packer.io/docs.
|
||||
|
||||
https://www.packer.io/docs
|
||||
|
||||
## Developing Packer
|
||||
## Contributing to Packer
|
||||
|
||||
See
|
||||
[CONTRIBUTING.md](https://github.com/hashicorp/packer/blob/master/.github/CONTRIBUTING.md)
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
// HCL config example:
|
||||
//
|
||||
// ```HCL
|
||||
// source "example" "amazon-ebs"{
|
||||
// source "amazon-ebs" "example" {
|
||||
// assume_role {
|
||||
// role_arn = "arn:aws:iam::ACCOUNT_ID:role/ROLE_NAME"
|
||||
// session_name = "SESSION_NAME"
|
||||
|
@ -174,6 +174,16 @@ type AccessConfig struct {
|
|||
// credential types) and GetFederationToken (for federation\_token
|
||||
// credential types) for more details.
|
||||
//
|
||||
// HCL2 example:
|
||||
//
|
||||
// ```hcl
|
||||
// vault_aws_engine {
|
||||
// name = "myrole"
|
||||
// role_arn = "myarn"
|
||||
// ttl = "3600s"
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// JSON example:
|
||||
//
|
||||
// ```json
|
||||
|
@ -185,16 +195,6 @@ type AccessConfig struct {
|
|||
// }
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// HCL2 example:
|
||||
//
|
||||
// ```hcl
|
||||
// vault_aws_engine {
|
||||
// name = "myrole"
|
||||
// role_arn = "myarn"
|
||||
// ttl = "3600s"
|
||||
// }
|
||||
// ```
|
||||
VaultAWSEngine VaultAWSEngineOptions `mapstructure:"vault_aws_engine" required:"false"`
|
||||
// [Polling configuration](#polling-configuration) for the AWS waiter. Configures the waiter that checks
|
||||
// resource state.
|
||||
|
|
|
@ -5,20 +5,10 @@ import (
|
|||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
)
|
||||
|
||||
func testAccessConfig() *AccessConfig {
|
||||
return &AccessConfig{
|
||||
getEC2Connection: func() ec2iface.EC2API {
|
||||
return &mockEC2Client{}
|
||||
},
|
||||
PollingConfig: new(AWSPollingConfig),
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccessConfigPrepare_Region(t *testing.T) {
|
||||
c := testAccessConfig()
|
||||
c := FakeAccessConfig()
|
||||
|
||||
c.RawRegion = "us-east-12"
|
||||
err := c.ValidateRegion(c.RawRegion)
|
||||
|
@ -40,7 +30,7 @@ func TestAccessConfigPrepare_Region(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAccessConfigPrepare_RegionRestricted(t *testing.T) {
|
||||
c := testAccessConfig()
|
||||
c := FakeAccessConfig()
|
||||
|
||||
// Create a Session with a custom region
|
||||
c.session = session.Must(session.NewSession(&aws.Config{
|
||||
|
|
|
@ -135,26 +135,8 @@ type AMIConfig struct {
|
|||
// the intermediary AMI into any regions provided in `ami_regions`, then
|
||||
// delete the intermediary AMI. Default `false`.
|
||||
AMISkipBuildRegion bool `mapstructure:"skip_save_build_region"`
|
||||
// Key/value pair tags to apply to snapshot. They will override AMI tags if
|
||||
// already applied to snapshot. This is a [template
|
||||
// engine](/docs/templates/legacy_json_templates/engine), see [Build template
|
||||
// data](#build-template-data) for more information.
|
||||
SnapshotTags map[string]string `mapstructure:"snapshot_tags" required:"false"`
|
||||
// Same as [`snapshot_tags`](#snapshot_tags) but defined as a singular
|
||||
// repeatable block containing a `key` and a `value` field. In HCL2 mode the
|
||||
// [`dynamic_block`](/docs/templates/hcl_templates/expressions#dynamic-blocks)
|
||||
// will allow you to create those programatically.
|
||||
SnapshotTag config.KeyValues `mapstructure:"snapshot_tag" required:"false"`
|
||||
// A list of account IDs that have
|
||||
// access to create volumes from the snapshot(s). By default no additional
|
||||
// users other than the user creating the AMI has permissions to create
|
||||
// volumes from the backing snapshot(s).
|
||||
SnapshotUsers []string `mapstructure:"snapshot_users" required:"false"`
|
||||
// A list of groups that have access to
|
||||
// create volumes from the snapshot(s). By default no groups have permission
|
||||
// to create volumes from the snapshot(s). all will make the snapshot
|
||||
// publicly accessible.
|
||||
SnapshotGroups []string `mapstructure:"snapshot_groups" required:"false"`
|
||||
|
||||
SnapshotConfig `mapstructure:",squash"`
|
||||
}
|
||||
|
||||
func stringInSlice(s []string, searchstr string) bool {
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
)
|
||||
|
||||
|
@ -18,14 +17,14 @@ func testAMIConfig() *AMIConfig {
|
|||
}
|
||||
|
||||
func getFakeAccessConfig(region string) *AccessConfig {
|
||||
c := testAccessConfig()
|
||||
c := FakeAccessConfig()
|
||||
c.RawRegion = region
|
||||
return c
|
||||
}
|
||||
|
||||
func TestAMIConfigPrepare_name(t *testing.T) {
|
||||
c := testAMIConfig()
|
||||
accessConf := testAccessConfig()
|
||||
accessConf := FakeAccessConfig()
|
||||
if err := c.Prepare(accessConf, nil); err != nil {
|
||||
t.Fatalf("shouldn't have err: %s", err)
|
||||
}
|
||||
|
@ -36,10 +35,6 @@ func TestAMIConfigPrepare_name(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
type mockEC2Client struct {
|
||||
ec2iface.EC2API
|
||||
}
|
||||
|
||||
func (m *mockEC2Client) DescribeRegions(*ec2.DescribeRegionsInput) (*ec2.DescribeRegionsOutput, error) {
|
||||
return &ec2.DescribeRegionsOutput{
|
||||
Regions: []*ec2.Region{
|
||||
|
@ -56,7 +51,7 @@ func TestAMIConfigPrepare_regions(t *testing.T) {
|
|||
|
||||
var errs []error
|
||||
var err error
|
||||
accessConf := testAccessConfig()
|
||||
accessConf := FakeAccessConfig()
|
||||
mockConn := &mockEC2Client{}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatalf("shouldn't have err: %#v", errs)
|
||||
|
@ -163,7 +158,7 @@ func TestAMIConfigPrepare_Share_EncryptedBoot(t *testing.T) {
|
|||
c.AMIUsers = []string{"testAccountID"}
|
||||
c.AMIEncryptBootVolume = config.TriTrue
|
||||
|
||||
accessConf := testAccessConfig()
|
||||
accessConf := FakeAccessConfig()
|
||||
|
||||
c.AMIKmsKeyId = ""
|
||||
if err := c.Prepare(accessConf, nil); err == nil {
|
||||
|
@ -179,7 +174,7 @@ func TestAMIConfigPrepare_ValidateKmsKey(t *testing.T) {
|
|||
c := testAMIConfig()
|
||||
c.AMIEncryptBootVolume = config.TriTrue
|
||||
|
||||
accessConf := testAccessConfig()
|
||||
accessConf := FakeAccessConfig()
|
||||
|
||||
validCases := []string{
|
||||
"abcd1234-e567-890f-a12b-a123b4cd56ef",
|
||||
|
@ -215,7 +210,7 @@ func TestAMIConfigPrepare_ValidateKmsKey(t *testing.T) {
|
|||
func TestAMINameValidation(t *testing.T) {
|
||||
c := testAMIConfig()
|
||||
|
||||
accessConf := testAccessConfig()
|
||||
accessConf := FakeAccessConfig()
|
||||
|
||||
c.AMIName = "aa"
|
||||
if err := c.Prepare(accessConf, nil); err == nil {
|
||||
|
|
|
@ -51,7 +51,10 @@ func (d *AmiFilterOptions) GetFilteredImage(params *ec2.DescribeImagesInput, ec2
|
|||
}
|
||||
|
||||
log.Printf("Using AMI Filters %v", params)
|
||||
imageResp, err := ec2conn.DescribeImages(params)
|
||||
req, imageResp := ec2conn.DescribeImagesRequest(params)
|
||||
req.RetryCount = 11
|
||||
|
||||
err := req.Send()
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error querying AMI: %s", err)
|
||||
return nil, err
|
||||
|
|
|
@ -30,18 +30,6 @@ const (
|
|||
// The following mapping will tell Packer to encrypt the root volume of the
|
||||
// build instance at launch using a specific non-default kms key:
|
||||
//
|
||||
// JSON example:
|
||||
//
|
||||
// ```json
|
||||
// launch_block_device_mappings: [
|
||||
// {
|
||||
// "device_name": "/dev/sda1",
|
||||
// "encrypted": true,
|
||||
// "kms_key_id": "1a2b3c4d-5e6f-1a2b-3c4d-5e6f1a2b3c4d"
|
||||
// }
|
||||
// ]
|
||||
// ```
|
||||
//
|
||||
// HCL2 example:
|
||||
//
|
||||
// ```hcl
|
||||
|
@ -52,6 +40,17 @@ const (
|
|||
// }
|
||||
// ```
|
||||
//
|
||||
// JSON example:
|
||||
// ```json
|
||||
// "launch_block_device_mappings": [
|
||||
// {
|
||||
// "device_name": "/dev/sda1",
|
||||
// "encrypted": true,
|
||||
// "kms_key_id": "1a2b3c4d-5e6f-1a2b-3c4d-5e6f1a2b3c4d"
|
||||
// }
|
||||
// ]
|
||||
// ```
|
||||
//
|
||||
// Please note that the kms_key_id option in this example exists for
|
||||
// launch_block_device_mappings but not ami_block_device_mappings.
|
||||
//
|
||||
|
|
|
@ -132,8 +132,21 @@ type RunConfig struct {
|
|||
// Whether or not to check if the IAM instance profile exists. Defaults to false
|
||||
SkipProfileValidation bool `mapstructure:"skip_profile_validation" required:"false"`
|
||||
// Temporary IAM instance profile policy document
|
||||
// If IamInstanceProfile is specified it will be used instead. Example:
|
||||
// If IamInstanceProfile is specified it will be used instead.
|
||||
//
|
||||
// HCL2 example:
|
||||
// ```hcl
|
||||
//temporary_iam_instance_profile_policy_document {
|
||||
// Statement {
|
||||
// Action = ["logs:*"]
|
||||
// Effect = "Allow"
|
||||
// Resource = "*"
|
||||
// }
|
||||
// Version = "2012-10-17"
|
||||
//}
|
||||
// ```
|
||||
//
|
||||
// JSON example:
|
||||
// ```json
|
||||
//{
|
||||
// "Version": "2012-10-17",
|
||||
|
@ -157,17 +170,7 @@ type RunConfig struct {
|
|||
// The EC2 instance type to use while building the
|
||||
// AMI, such as t2.small.
|
||||
InstanceType string `mapstructure:"instance_type" required:"true"`
|
||||
// Filters used to populate the `security_group_ids` field. JSON Example:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "security_group_filter": {
|
||||
// "filters": {
|
||||
// "tag:Class": "packer"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
// Filters used to populate the `security_group_ids` field.
|
||||
//
|
||||
// HCL2 Example:
|
||||
//
|
||||
|
@ -179,6 +182,17 @@ type RunConfig struct {
|
|||
// }
|
||||
// ```
|
||||
//
|
||||
// JSON Example:
|
||||
// ```json
|
||||
// {
|
||||
// "security_group_filter": {
|
||||
// "filters": {
|
||||
// "tag:Class": "packer"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// This selects the SG's with tag `Class` with the value `packer`.
|
||||
//
|
||||
// - `filters` (map of strings) - filters used to select a
|
||||
|
@ -213,8 +227,24 @@ type RunConfig struct {
|
|||
// AMI with a root volume snapshot that you have access to.
|
||||
SourceAmi string `mapstructure:"source_ami" required:"true"`
|
||||
// Filters used to populate the `source_ami`
|
||||
// field. JSON Example:
|
||||
// field.
|
||||
//
|
||||
// HCL2 example:
|
||||
// ```hcl
|
||||
// source "amazon-ebs" "basic-example" {
|
||||
// source_ami_filter {
|
||||
// filters = {
|
||||
// virtualization-type = "hvm"
|
||||
// name = "ubuntu/images/\*ubuntu-xenial-16.04-amd64-server-\*"
|
||||
// root-device-type = "ebs"
|
||||
// }
|
||||
// owners = ["099720109477"]
|
||||
// most_recent = true
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// JSON Example:
|
||||
// ```json
|
||||
// "builders" [
|
||||
// {
|
||||
|
@ -231,21 +261,6 @@ type RunConfig struct {
|
|||
// }
|
||||
// ]
|
||||
// ```
|
||||
// HCL2 example:
|
||||
//
|
||||
// ```hcl
|
||||
// source "amazon-ebs" "basic-example" {
|
||||
// source_ami_filter {
|
||||
// filters = {
|
||||
// virtualization-type = "hvm"
|
||||
// name = "ubuntu/images/\*ubuntu-xenial-16.04-amd64-server-\*"
|
||||
// root-device-type = "ebs"
|
||||
// }
|
||||
// owners = ["099720109477"]
|
||||
// most_recent = true
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
|
||||
// This will fail unless *exactly* one AMI is returned. In the above example,
|
||||
|
@ -313,8 +328,22 @@ type RunConfig struct {
|
|||
// will allow you to create those programatically.
|
||||
SpotTag config.KeyValues `mapstructure:"spot_tag" required:"false"`
|
||||
// Filters used to populate the `subnet_id` field.
|
||||
// JSON Example:
|
||||
//
|
||||
// HCL2 example:
|
||||
//
|
||||
// ```hcl
|
||||
// source "amazon-ebs" "basic-example" {
|
||||
// subnet_filter {
|
||||
// filters = {
|
||||
// "tag:Class": "build"
|
||||
// }
|
||||
// most_free = true
|
||||
// random = false
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// JSON Example:
|
||||
// ```json
|
||||
// "builders" [
|
||||
// {
|
||||
|
@ -329,19 +358,6 @@ type RunConfig struct {
|
|||
// }
|
||||
// ]
|
||||
// ```
|
||||
// HCL2 example:
|
||||
//
|
||||
// ```hcl
|
||||
// source "amazon-ebs" "basic-example" {
|
||||
// subnet_filter {
|
||||
// filters = {
|
||||
// "tag:Class": "build"
|
||||
// }
|
||||
// most_free = true
|
||||
// random = false
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// This selects the Subnet with tag `Class` with the value `build`, which has
|
||||
// the most free IP addresses. NOTE: This will fail unless *exactly* one
|
||||
|
@ -388,8 +404,21 @@ type RunConfig struct {
|
|||
// data when launching the instance.
|
||||
UserDataFile string `mapstructure:"user_data_file" required:"false"`
|
||||
// Filters used to populate the `vpc_id` field.
|
||||
// JSON Example:
|
||||
//
|
||||
// HCL2 example:
|
||||
// ```hcl
|
||||
// source "amazon-ebs" "basic-example" {
|
||||
// vpc_filter {
|
||||
// filters = {
|
||||
// "tag:Class": "build",
|
||||
// "isDefault": "false",
|
||||
// "cidr": "/24"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// JSON Example:
|
||||
// ```json
|
||||
// "builders" [
|
||||
// {
|
||||
|
@ -404,19 +433,6 @@ type RunConfig struct {
|
|||
// }
|
||||
// ]
|
||||
// ```
|
||||
// HCL2 example:
|
||||
//
|
||||
// ```hcl
|
||||
// source "amazon-ebs" "basic-example" {
|
||||
// vpc_filter {
|
||||
// filters = {
|
||||
// "tag:Class": "build",
|
||||
// "isDefault": "false",
|
||||
// "cidr": "/24"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// This selects the VPC with tag `Class` with the value `build`, which is not
|
||||
// the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
//go:generate struct-markdown
|
||||
|
||||
package common
|
||||
|
||||
import "github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
|
||||
// SnapshotConfig is for common configuration related to creating AMIs.
|
||||
type SnapshotConfig struct {
|
||||
// Key/value pair tags to apply to snapshot. They will override AMI tags if
|
||||
// already applied to snapshot. This is a [template
|
||||
// engine](/docs/templates/legacy_json_templates/engine), see [Build template
|
||||
// data](#build-template-data) for more information.
|
||||
SnapshotTags map[string]string `mapstructure:"snapshot_tags" required:"false"`
|
||||
// Same as [`snapshot_tags`](#snapshot_tags) but defined as a singular
|
||||
// repeatable block containing a `key` and a `value` field. In HCL2 mode the
|
||||
// [`dynamic_block`](/docs/templates/hcl_templates/expressions#dynamic-blocks)
|
||||
// will allow you to create those programatically.
|
||||
SnapshotTag config.KeyValues `mapstructure:"snapshot_tag" required:"false"`
|
||||
// A list of account IDs that have
|
||||
// access to create volumes from the snapshot(s). By default no additional
|
||||
// users other than the user creating the AMI has permissions to create
|
||||
// volumes from the backing snapshot(s).
|
||||
SnapshotUsers []string `mapstructure:"snapshot_users" required:"false"`
|
||||
// A list of groups that have access to
|
||||
// create volumes from the snapshot(s). By default no groups have permission
|
||||
// to create volumes from the snapshot(s). all will make the snapshot
|
||||
// publicly accessible.
|
||||
SnapshotGroups []string `mapstructure:"snapshot_groups" required:"false"`
|
||||
}
|
|
@ -42,17 +42,8 @@ type StateChangeConf struct {
|
|||
|
||||
// Polling configuration for the AWS waiter. Configures the waiter for resources creation or actions like attaching
|
||||
// volumes or importing image.
|
||||
// Usage example:
|
||||
//
|
||||
// In JSON:
|
||||
// ```json
|
||||
// "aws_polling" : {
|
||||
// "delay_seconds": 30,
|
||||
// "max_attempts": 50
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// In HCL2:
|
||||
// HCL2 example:
|
||||
// ```hcl
|
||||
// aws_polling {
|
||||
// delay_seconds = 30
|
||||
|
@ -60,6 +51,13 @@ type StateChangeConf struct {
|
|||
// }
|
||||
// ```
|
||||
//
|
||||
// JSON example:
|
||||
// ```json
|
||||
// "aws_polling" : {
|
||||
// "delay_seconds": 30,
|
||||
// "max_attempts": 50
|
||||
// }
|
||||
// ```
|
||||
type AWSPollingConfig struct {
|
||||
// Specifies the maximum number of attempts the waiter will check for resource state.
|
||||
// This value can also be set via the AWS_MAX_ATTEMPTS.
|
||||
|
@ -151,15 +149,22 @@ func (w *AWSPollingConfig) WaitUntilVolumeAvailable(ctx aws.Context, conn *ec2.E
|
|||
return err
|
||||
}
|
||||
|
||||
func (w *AWSPollingConfig) WaitUntilSnapshotDone(ctx aws.Context, conn *ec2.EC2, snapshotID string) error {
|
||||
func (w *AWSPollingConfig) WaitUntilSnapshotDone(ctx aws.Context, conn ec2iface.EC2API, snapshotID string) error {
|
||||
snapInput := ec2.DescribeSnapshotsInput{
|
||||
SnapshotIds: []*string{&snapshotID},
|
||||
}
|
||||
|
||||
waitOpts := w.getWaiterOptions()
|
||||
if len(waitOpts) == 0 {
|
||||
// Bump this default to 30 minutes.
|
||||
// Large snapshots can take a long time for the copy to s3
|
||||
waitOpts = append(waitOpts, request.WithWaiterMaxAttempts(120))
|
||||
}
|
||||
|
||||
err := conn.WaitUntilSnapshotCompletedWithContext(
|
||||
ctx,
|
||||
&snapInput,
|
||||
w.getWaiterOptions()...)
|
||||
waitOpts...)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ func TestStepAMIRegionCopy_duplicates(t *testing.T) {
|
|||
// ------------------------------------------------------------------------
|
||||
|
||||
stepAMIRegionCopy := StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
AccessConfig: FakeAccessConfig(),
|
||||
Regions: []string{"us-east-1"},
|
||||
AMIKmsKeyId: "12345",
|
||||
// Original region key in regionkeyids is different than in amikmskeyid
|
||||
|
@ -131,7 +131,7 @@ func TestStepAMIRegionCopy_duplicates(t *testing.T) {
|
|||
|
||||
// the ami is only copied once.
|
||||
stepAMIRegionCopy = StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
AccessConfig: FakeAccessConfig(),
|
||||
Regions: []string{"us-east-1"},
|
||||
Name: "fake-ami-name",
|
||||
OriginalRegion: "us-east-1",
|
||||
|
@ -152,7 +152,7 @@ func TestStepAMIRegionCopy_duplicates(t *testing.T) {
|
|||
|
||||
// the ami is only copied once.
|
||||
stepAMIRegionCopy = StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
AccessConfig: FakeAccessConfig(),
|
||||
Regions: []string{"us-east-1"},
|
||||
EncryptBootVolume: config.TriFalse,
|
||||
Name: "fake-ami-name",
|
||||
|
@ -174,7 +174,7 @@ func TestStepAMIRegionCopy_duplicates(t *testing.T) {
|
|||
// ------------------------------------------------------------------------
|
||||
|
||||
stepAMIRegionCopy = StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
AccessConfig: FakeAccessConfig(),
|
||||
// Many duplicates for only 3 actual values
|
||||
Regions: []string{"us-east-1", "us-west-2", "us-west-2", "ap-east-1", "ap-east-1", "ap-east-1"},
|
||||
AMIKmsKeyId: "IlikePancakes",
|
||||
|
@ -203,7 +203,7 @@ func TestStepAMIRegionCopy_duplicates(t *testing.T) {
|
|||
// ------------------------------------------------------------------------
|
||||
|
||||
stepAMIRegionCopy = StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
AccessConfig: FakeAccessConfig(),
|
||||
// Many duplicates for only 3 actual values
|
||||
Regions: []string{"us-east-1", "us-west-2", "us-west-2", "ap-east-1", "ap-east-1", "ap-east-1"},
|
||||
Name: "fake-ami-name",
|
||||
|
@ -223,7 +223,7 @@ func TestStepAMIRegionCopy_duplicates(t *testing.T) {
|
|||
func TestStepAmiRegionCopy_nil_encryption(t *testing.T) {
|
||||
// create step
|
||||
stepAMIRegionCopy := StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
AccessConfig: FakeAccessConfig(),
|
||||
Regions: make([]string, 0),
|
||||
AMIKmsKeyId: "",
|
||||
RegionKeyIds: make(map[string]string),
|
||||
|
@ -249,7 +249,7 @@ func TestStepAmiRegionCopy_nil_encryption(t *testing.T) {
|
|||
func TestStepAmiRegionCopy_true_encryption(t *testing.T) {
|
||||
// create step
|
||||
stepAMIRegionCopy := StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
AccessConfig: FakeAccessConfig(),
|
||||
Regions: make([]string, 0),
|
||||
AMIKmsKeyId: "",
|
||||
RegionKeyIds: make(map[string]string),
|
||||
|
@ -275,7 +275,7 @@ func TestStepAmiRegionCopy_true_encryption(t *testing.T) {
|
|||
func TestStepAmiRegionCopy_nil_intermediary(t *testing.T) {
|
||||
// create step
|
||||
stepAMIRegionCopy := StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
AccessConfig: FakeAccessConfig(),
|
||||
Regions: make([]string, 0),
|
||||
AMIKmsKeyId: "",
|
||||
RegionKeyIds: make(map[string]string),
|
||||
|
@ -303,7 +303,7 @@ func TestStepAmiRegionCopy_AMISkipBuildRegion(t *testing.T) {
|
|||
// ------------------------------------------------------------------------
|
||||
|
||||
stepAMIRegionCopy := StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
AccessConfig: FakeAccessConfig(),
|
||||
Regions: []string{"us-west-1"},
|
||||
AMIKmsKeyId: "",
|
||||
RegionKeyIds: map[string]string{"us-west-1": "abcde"},
|
||||
|
@ -329,7 +329,7 @@ func TestStepAmiRegionCopy_AMISkipBuildRegion(t *testing.T) {
|
|||
// skip build region is false.
|
||||
// ------------------------------------------------------------------------
|
||||
stepAMIRegionCopy = StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
AccessConfig: FakeAccessConfig(),
|
||||
Regions: []string{"us-west-1"},
|
||||
AMIKmsKeyId: "",
|
||||
RegionKeyIds: make(map[string]string),
|
||||
|
@ -354,7 +354,7 @@ func TestStepAmiRegionCopy_AMISkipBuildRegion(t *testing.T) {
|
|||
// skip build region is false, but encrypt is true
|
||||
// ------------------------------------------------------------------------
|
||||
stepAMIRegionCopy = StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
AccessConfig: FakeAccessConfig(),
|
||||
Regions: []string{"us-west-1"},
|
||||
AMIKmsKeyId: "",
|
||||
RegionKeyIds: map[string]string{"us-west-1": "abcde"},
|
||||
|
@ -380,7 +380,7 @@ func TestStepAmiRegionCopy_AMISkipBuildRegion(t *testing.T) {
|
|||
// skip build region is true, and encrypt is true
|
||||
// ------------------------------------------------------------------------
|
||||
stepAMIRegionCopy = StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
AccessConfig: FakeAccessConfig(),
|
||||
Regions: []string{"us-west-1"},
|
||||
AMIKmsKeyId: "",
|
||||
RegionKeyIds: map[string]string{"us-west-1": "abcde"},
|
||||
|
|
|
@ -258,7 +258,7 @@ func (s *StepRunSourceInstance) Run(ctx context.Context, state multistep.StateBa
|
|||
if resp, e := ec2conn.DescribeInstances(describeInstance); e == nil {
|
||||
if len(resp.Reservations) > 0 && len(resp.Reservations[0].Instances) > 0 {
|
||||
instance := resp.Reservations[0].Instances[0]
|
||||
if instance.StateTransitionReason != nil && instance.StateReason.Message != nil {
|
||||
if instance.StateTransitionReason != nil && instance.StateReason != nil && instance.StateReason.Message != nil {
|
||||
ui.Error(fmt.Sprintf("Instance state change details: %s: %s",
|
||||
*instance.StateTransitionReason, *instance.StateReason.Message))
|
||||
}
|
||||
|
|
|
@ -54,6 +54,24 @@ type StepRunSpotInstance struct {
|
|||
instanceId string
|
||||
}
|
||||
|
||||
// The EbsBlockDevice and LaunchTemplateEbsBlockDeviceRequest structs are
|
||||
// nearly identical except for the struct's name and one extra field in
|
||||
// EbsBlockDeviceResuest, which unfortunately means you can't just cast one
|
||||
// into the other. THANKS AMAZON.
|
||||
func castBlockDeviceToRequest(bd *ec2.EbsBlockDevice) *ec2.LaunchTemplateEbsBlockDeviceRequest {
|
||||
out := &ec2.LaunchTemplateEbsBlockDeviceRequest{
|
||||
DeleteOnTermination: bd.DeleteOnTermination,
|
||||
Encrypted: bd.Encrypted,
|
||||
Iops: bd.Iops,
|
||||
KmsKeyId: bd.KmsKeyId,
|
||||
SnapshotId: bd.SnapshotId,
|
||||
Throughput: bd.Throughput,
|
||||
VolumeSize: bd.VolumeSize,
|
||||
VolumeType: bd.VolumeType,
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (s *StepRunSpotInstance) CreateTemplateData(userData *string, az string,
|
||||
state multistep.StateBag, marketOptions *ec2.LaunchTemplateInstanceMarketOptionsRequest) *ec2.RequestLaunchTemplateData {
|
||||
blockDeviceMappings := s.LaunchMappings.BuildEC2BlockDeviceMappings()
|
||||
|
@ -61,15 +79,12 @@ func (s *StepRunSpotInstance) CreateTemplateData(userData *string, az string,
|
|||
// LaunchTemplateBlockDeviceMappingRequest. These structs are identical,
|
||||
// except for the EBS field -- on one, that field contains a
|
||||
// LaunchTemplateEbsBlockDeviceRequest, and on the other, it contains an
|
||||
// EbsBlockDevice. The EbsBlockDevice and
|
||||
// LaunchTemplateEbsBlockDeviceRequest structs are themselves
|
||||
// identical except for the struct's name, so you can cast one directly
|
||||
// into the other.
|
||||
// EbsBlockDevice.
|
||||
var launchMappingRequests []*ec2.LaunchTemplateBlockDeviceMappingRequest
|
||||
for _, mapping := range blockDeviceMappings {
|
||||
launchRequest := &ec2.LaunchTemplateBlockDeviceMappingRequest{
|
||||
DeviceName: mapping.DeviceName,
|
||||
Ebs: (*ec2.LaunchTemplateEbsBlockDeviceRequest)(mapping.Ebs),
|
||||
Ebs: castBlockDeviceToRequest(mapping.Ebs),
|
||||
VirtualName: mapping.VirtualName,
|
||||
}
|
||||
launchMappingRequests = append(launchMappingRequests, launchRequest)
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
)
|
||||
|
||||
type mockEC2Client struct {
|
||||
ec2iface.EC2API
|
||||
}
|
||||
|
||||
func FakeAccessConfig() *AccessConfig {
|
||||
accessConfig := AccessConfig{
|
||||
getEC2Connection: func() ec2iface.EC2API {
|
||||
return &mockEC2Client{}
|
||||
},
|
||||
PollingConfig: new(AWSPollingConfig),
|
||||
}
|
||||
accessConfig.session = session.Must(session.NewSession(&aws.Config{
|
||||
Region: aws.String("us-west-1"),
|
||||
}))
|
||||
|
||||
return &accessConfig
|
||||
}
|
|
@ -13,11 +13,15 @@ import (
|
|||
// map of region to list of volume IDs
|
||||
type EbsVolumes map[string][]string
|
||||
|
||||
// map of region to list of snapshot IDs
|
||||
type EbsSnapshots map[string][]string
|
||||
|
||||
// Artifact is an artifact implementation that contains built AMIs.
|
||||
type Artifact struct {
|
||||
// A map of regions to EBS Volume IDs.
|
||||
Volumes EbsVolumes
|
||||
|
||||
// A map of regions to EBS Snapshot IDs.
|
||||
Snapshots EbsSnapshots
|
||||
// BuilderId is the unique ID for the builder that created this AMI
|
||||
BuilderIdValue string
|
||||
|
||||
|
@ -40,13 +44,21 @@ func (*Artifact) Files() []string {
|
|||
|
||||
// returns a sorted list of region:ID pairs
|
||||
func (a *Artifact) idList() []string {
|
||||
parts := make([]string, 0, len(a.Volumes))
|
||||
|
||||
parts := make([]string, 0, len(a.Volumes)+len(a.Snapshots))
|
||||
|
||||
for region, volumeIDs := range a.Volumes {
|
||||
for _, volumeID := range volumeIDs {
|
||||
parts = append(parts, fmt.Sprintf("%s:%s", region, volumeID))
|
||||
}
|
||||
}
|
||||
|
||||
for region, snapshotIDs := range a.Snapshots {
|
||||
for _, snapshotID := range snapshotIDs {
|
||||
parts = append(parts, fmt.Sprintf("%s:%s", region, snapshotID))
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(parts)
|
||||
return parts
|
||||
}
|
||||
|
|
|
@ -20,6 +20,11 @@ type BlockDevice struct {
|
|||
// [`dynamic_block`](/docs/templates/hcl_templates/expressions#dynamic-blocks)
|
||||
// will allow you to create those programatically.
|
||||
Tag config.KeyValues `mapstructure:"tag" required:"false"`
|
||||
|
||||
// Create a Snapshot of this Volume.
|
||||
SnapshotVolume bool `mapstructure:"snapshot_volume" required:"false"`
|
||||
|
||||
awscommon.SnapshotConfig `mapstructure:",squash"`
|
||||
}
|
||||
|
||||
type BlockDevices []BlockDevice
|
||||
|
@ -38,6 +43,7 @@ func (bds BlockDevices) Prepare(ctx *interpolate.Context) (errs []error) {
|
|||
for _, block := range bds {
|
||||
|
||||
errs = append(errs, block.Tag.CopyOn(&block.Tags)...)
|
||||
errs = append(errs, block.SnapshotTag.CopyOn(&block.SnapshotTags)...)
|
||||
|
||||
if err := block.Prepare(ctx); err != nil {
|
||||
errs = append(errs, err)
|
||||
|
|
|
@ -123,16 +123,12 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, []string, error) {
|
|||
// Accumulate any errors
|
||||
var errs *packersdk.MultiError
|
||||
var warns []string
|
||||
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.VolumeRunTag.CopyOn(&b.config.VolumeRunTags)...)
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.AccessConfig.Prepare()...)
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...)
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.launchBlockDevices.Prepare(&b.config.ctx)...)
|
||||
|
||||
for _, d := range b.config.VolumeMappings {
|
||||
if err := d.Prepare(&b.config.ctx); err != nil {
|
||||
errs = packersdk.MultiErrorAppend(errs, fmt.Errorf("AMIMapping: %s", err.Error()))
|
||||
}
|
||||
}
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.VolumeMappings.Prepare(&b.config.ctx)...)
|
||||
|
||||
b.config.launchBlockDevices = b.config.VolumeMappings
|
||||
if err != nil {
|
||||
|
@ -318,6 +314,12 @@ func (b *Builder) Run(ctx context.Context, ui packersdk.Ui, hook packersdk.Hook)
|
|||
EnableAMISriovNetSupport: b.config.AMISriovNetSupport,
|
||||
EnableAMIENASupport: b.config.AMIENASupport,
|
||||
},
|
||||
&stepSnapshotEBSVolumes{
|
||||
PollingConfig: b.config.PollingConfig,
|
||||
VolumeMapping: b.config.VolumeMappings,
|
||||
AccessConfig: &b.config.AccessConfig,
|
||||
Ctx: b.config.ctx,
|
||||
},
|
||||
}
|
||||
|
||||
// Run!
|
||||
|
@ -332,6 +334,7 @@ func (b *Builder) Run(ctx context.Context, ui packersdk.Ui, hook packersdk.Hook)
|
|||
// Build the artifact and return it
|
||||
artifact := &Artifact{
|
||||
Volumes: state.Get("ebsvolumes").(EbsVolumes),
|
||||
Snapshots: state.Get("ebssnapshots").(EbsSnapshots),
|
||||
BuilderIdValue: BuilderId,
|
||||
Conn: ec2conn,
|
||||
StateData: map[string]interface{}{"generated_data": state.Get("generated_data")},
|
||||
|
|
|
@ -25,6 +25,11 @@ type FlatBlockDevice struct {
|
|||
KmsKeyId *string `mapstructure:"kms_key_id" required:"false" cty:"kms_key_id" hcl:"kms_key_id"`
|
||||
Tags map[string]string `mapstructure:"tags" required:"false" cty:"tags" hcl:"tags"`
|
||||
Tag []config.FlatKeyValue `mapstructure:"tag" required:"false" cty:"tag" hcl:"tag"`
|
||||
SnapshotVolume *bool `mapstructure:"snapshot_volume" required:"false" cty:"snapshot_volume" hcl:"snapshot_volume"`
|
||||
SnapshotTags map[string]string `mapstructure:"snapshot_tags" required:"false" cty:"snapshot_tags" hcl:"snapshot_tags"`
|
||||
SnapshotTag []config.FlatKeyValue `mapstructure:"snapshot_tag" required:"false" cty:"snapshot_tag" hcl:"snapshot_tag"`
|
||||
SnapshotUsers []string `mapstructure:"snapshot_users" required:"false" cty:"snapshot_users" hcl:"snapshot_users"`
|
||||
SnapshotGroups []string `mapstructure:"snapshot_groups" required:"false" cty:"snapshot_groups" hcl:"snapshot_groups"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatBlockDevice.
|
||||
|
@ -52,6 +57,11 @@ func (*FlatBlockDevice) HCL2Spec() map[string]hcldec.Spec {
|
|||
"kms_key_id": &hcldec.AttrSpec{Name: "kms_key_id", Type: cty.String, Required: false},
|
||||
"tags": &hcldec.AttrSpec{Name: "tags", Type: cty.Map(cty.String), Required: false},
|
||||
"tag": &hcldec.BlockListSpec{TypeName: "tag", Nested: hcldec.ObjectSpec((*config.FlatKeyValue)(nil).HCL2Spec())},
|
||||
"snapshot_volume": &hcldec.AttrSpec{Name: "snapshot_volume", Type: cty.Bool, Required: false},
|
||||
"snapshot_tags": &hcldec.AttrSpec{Name: "snapshot_tags", Type: cty.Map(cty.String), Required: false},
|
||||
"snapshot_tag": &hcldec.BlockListSpec{TypeName: "snapshot_tag", Nested: hcldec.ObjectSpec((*config.FlatKeyValue)(nil).HCL2Spec())},
|
||||
"snapshot_users": &hcldec.AttrSpec{Name: "snapshot_users", Type: cty.List(cty.String), Required: false},
|
||||
"snapshot_groups": &hcldec.AttrSpec{Name: "snapshot_groups", Type: cty.List(cty.String), Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
|
|
@ -104,9 +104,6 @@ func TestBuilderPrepare_ReturnGeneratedData(t *testing.T) {
|
|||
if len(generatedData) == 0 {
|
||||
t.Fatalf("Generated data should not be empty")
|
||||
}
|
||||
if len(generatedData) == 0 {
|
||||
t.Fatalf("Generated data should not be empty")
|
||||
}
|
||||
if generatedData[0] != "SourceAMIName" {
|
||||
t.Fatalf("Generated data should contain SourceAMIName")
|
||||
}
|
||||
|
@ -126,3 +123,44 @@ func TestBuilderPrepare_ReturnGeneratedData(t *testing.T) {
|
|||
t.Fatalf("Generated data should contain SourceAMIOwnerName")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuidler_ConfigBlockdevicemapping(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
//Set some snapshot settings
|
||||
config["ebs_volumes"] = []map[string]interface{}{
|
||||
{
|
||||
"device_name": "/dev/xvdb",
|
||||
"volume_size": "32",
|
||||
"delete_on_termination": true,
|
||||
},
|
||||
{
|
||||
"device_name": "/dev/xvdc",
|
||||
"volume_size": "32",
|
||||
"delete_on_termination": true,
|
||||
"snapshot_tags": map[string]string{
|
||||
"Test_Tag": "tag_value",
|
||||
"another tag": "another value",
|
||||
},
|
||||
"snapshot_users": []string{
|
||||
"123", "456",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
generatedData, warnings, err := b.Prepare(config)
|
||||
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
if len(generatedData) == 0 {
|
||||
t.Fatalf("Generated data should not be empty")
|
||||
}
|
||||
|
||||
t.Logf("Test gen %+v", b.config.VolumeMappings)
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,167 @@
|
|||
package ebsvolume
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
)
|
||||
|
||||
type stepSnapshotEBSVolumes struct {
|
||||
PollingConfig *awscommon.AWSPollingConfig
|
||||
AccessConfig *awscommon.AccessConfig
|
||||
VolumeMapping []BlockDevice
|
||||
//Map of SnapshotID: BlockDevice, Where *BlockDevice is in VolumeMapping
|
||||
snapshotMap map[string]*BlockDevice
|
||||
Ctx interpolate.Context
|
||||
}
|
||||
|
||||
func (s *stepSnapshotEBSVolumes) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(ec2iface.EC2API)
|
||||
instance := state.Get("instance").(*ec2.Instance)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
s.snapshotMap = make(map[string]*BlockDevice)
|
||||
|
||||
for _, instanceBlockDevice := range instance.BlockDeviceMappings {
|
||||
for _, configVolumeMapping := range s.VolumeMapping {
|
||||
//Find the config entry for the instance blockDevice
|
||||
if configVolumeMapping.DeviceName == *instanceBlockDevice.DeviceName {
|
||||
//Skip Volumes that are not set to create snapshot
|
||||
if configVolumeMapping.SnapshotVolume != true {
|
||||
continue
|
||||
}
|
||||
|
||||
ui.Message(fmt.Sprintf("Compiling list of tags to apply to snapshot from Volume %s...", *instanceBlockDevice.DeviceName))
|
||||
tags, err := awscommon.TagMap(configVolumeMapping.SnapshotTags).EC2Tags(s.Ctx, s.AccessConfig.SessionRegion(), state)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error generating tags for snapshot %s: %s", *instanceBlockDevice.DeviceName, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
tags.Report(ui)
|
||||
|
||||
tagSpec := &ec2.TagSpecification{
|
||||
ResourceType: aws.String("snapshot"),
|
||||
Tags: tags,
|
||||
}
|
||||
|
||||
input := &ec2.CreateSnapshotInput{
|
||||
VolumeId: aws.String(*instanceBlockDevice.Ebs.VolumeId),
|
||||
TagSpecifications: []*ec2.TagSpecification{tagSpec},
|
||||
}
|
||||
|
||||
//Dont try to set an empty tag spec
|
||||
if len(tags) == 0 {
|
||||
input.TagSpecifications = nil
|
||||
}
|
||||
|
||||
ui.Message(fmt.Sprintf("Requesting snapshot of volume: %s...", *instanceBlockDevice.Ebs.VolumeId))
|
||||
snapshot, err := ec2conn.CreateSnapshot(input)
|
||||
if err != nil || snapshot == nil {
|
||||
err := fmt.Errorf("Error generating snapsot for volume %s: %s", *instanceBlockDevice.Ebs.VolumeId, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
ui.Message(fmt.Sprintf("Requested Snapshot of Volume %s: %s", *instanceBlockDevice.Ebs.VolumeId, *snapshot.SnapshotId))
|
||||
s.snapshotMap[*snapshot.SnapshotId] = &configVolumeMapping
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ui.Say("Waiting for Snapshots to become ready...")
|
||||
for snapID := range s.snapshotMap {
|
||||
ui.Message(fmt.Sprintf("Waiting for %s to be ready.", snapID))
|
||||
err := s.PollingConfig.WaitUntilSnapshotDone(ctx, ec2conn, snapID)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error waiting for snapsot to become ready %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
ui.Message("Failed to wait")
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
ui.Message(fmt.Sprintf("Snapshot Ready: %s", snapID))
|
||||
}
|
||||
|
||||
//Attach User and Group permissions to snapshots
|
||||
ui.Say("Setting User/Group Permissions for Snapshots...")
|
||||
for snapID, bd := range s.snapshotMap {
|
||||
snapshotOptions := make(map[string]*ec2.ModifySnapshotAttributeInput)
|
||||
|
||||
if len(bd.SnapshotGroups) > 0 {
|
||||
groups := make([]*string, len(bd.SnapshotGroups))
|
||||
addsSnapshot := make([]*ec2.CreateVolumePermission, len(bd.SnapshotGroups))
|
||||
|
||||
addSnapshotGroups := &ec2.ModifySnapshotAttributeInput{
|
||||
CreateVolumePermission: &ec2.CreateVolumePermissionModifications{},
|
||||
}
|
||||
|
||||
for i, g := range bd.SnapshotGroups {
|
||||
groups[i] = aws.String(g)
|
||||
addsSnapshot[i] = &ec2.CreateVolumePermission{
|
||||
Group: aws.String(g),
|
||||
}
|
||||
}
|
||||
|
||||
addSnapshotGroups.GroupNames = groups
|
||||
addSnapshotGroups.CreateVolumePermission.Add = addsSnapshot
|
||||
snapshotOptions["groups"] = addSnapshotGroups
|
||||
|
||||
}
|
||||
|
||||
if len(bd.SnapshotUsers) > 0 {
|
||||
users := make([]*string, len(bd.SnapshotUsers))
|
||||
addsSnapshot := make([]*ec2.CreateVolumePermission, len(bd.SnapshotUsers))
|
||||
for i, u := range bd.SnapshotUsers {
|
||||
users[i] = aws.String(u)
|
||||
addsSnapshot[i] = &ec2.CreateVolumePermission{UserId: aws.String(u)}
|
||||
}
|
||||
|
||||
snapshotOptions["users"] = &ec2.ModifySnapshotAttributeInput{
|
||||
UserIds: users,
|
||||
CreateVolumePermission: &ec2.CreateVolumePermissionModifications{
|
||||
Add: addsSnapshot,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
//Todo: Copy to other regions and repeat this block in all regions.
|
||||
for name, input := range snapshotOptions {
|
||||
ui.Message(fmt.Sprintf("Modifying: %s", name))
|
||||
input.SnapshotId = &snapID
|
||||
_, err := ec2conn.ModifySnapshotAttribute(input)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error modify snapshot attributes: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//Record all snapshots in current Region.
|
||||
snapshots := make(EbsSnapshots)
|
||||
currentregion := s.AccessConfig.SessionRegion()
|
||||
|
||||
for snapID := range s.snapshotMap {
|
||||
snapshots[currentregion] = append(
|
||||
snapshots[currentregion],
|
||||
snapID)
|
||||
}
|
||||
//Records artifacts
|
||||
state.Put("ebssnapshots", snapshots)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepSnapshotEBSVolumes) Cleanup(state multistep.StateBag) {
|
||||
// No cleanup...
|
||||
}
|
|
@ -0,0 +1,170 @@
|
|||
package ebsvolume
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
|
||||
//"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer/builder/amazon/common"
|
||||
)
|
||||
|
||||
// Define a mock struct to be used in unit tests for common aws steps.
|
||||
type mockEC2Conn struct {
|
||||
ec2iface.EC2API
|
||||
Config *aws.Config
|
||||
}
|
||||
|
||||
func (m *mockEC2Conn) CreateSnapshot(input *ec2.CreateSnapshotInput) (*ec2.Snapshot, error) {
|
||||
snap := &ec2.Snapshot{
|
||||
// This isn't typical amazon format, but injecting the volume id into
|
||||
// this field lets us verify that the right volume was snapshotted with
|
||||
// a simple string comparison
|
||||
SnapshotId: aws.String(fmt.Sprintf("snap-of-%s", *input.VolumeId)),
|
||||
}
|
||||
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
func (m *mockEC2Conn) WaitUntilSnapshotCompletedWithContext(aws.Context, *ec2.DescribeSnapshotsInput, ...request.WaiterOption) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getMockConn(config *common.AccessConfig, target string) (ec2iface.EC2API, error) {
|
||||
mockConn := &mockEC2Conn{
|
||||
Config: aws.NewConfig(),
|
||||
}
|
||||
return mockConn, nil
|
||||
}
|
||||
|
||||
// Create statebag for running test
|
||||
func tState(t *testing.T) multistep.StateBag {
|
||||
state := new(multistep.BasicStateBag)
|
||||
state.Put("ui", &packer.BasicUi{
|
||||
Reader: new(bytes.Buffer),
|
||||
Writer: new(bytes.Buffer),
|
||||
})
|
||||
// state.Put("amis", map[string]string{"us-east-1": "ami-12345"})
|
||||
// state.Put("snapshots", map[string][]string{"us-east-1": {"snap-0012345"}})
|
||||
conn, _ := getMockConn(&common.AccessConfig{}, "us-east-2")
|
||||
|
||||
state.Put("ec2", conn)
|
||||
// Store a fake instance that contains a block device that matches the
|
||||
// volumes defined in the config above
|
||||
state.Put("instance", &ec2.Instance{
|
||||
InstanceId: aws.String("instance-id"),
|
||||
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
|
||||
{
|
||||
DeviceName: aws.String("/dev/xvda"),
|
||||
Ebs: &ec2.EbsInstanceBlockDevice{
|
||||
VolumeId: aws.String("vol-1234"),
|
||||
},
|
||||
},
|
||||
{
|
||||
DeviceName: aws.String("/dev/xvdb"),
|
||||
Ebs: &ec2.EbsInstanceBlockDevice{
|
||||
VolumeId: aws.String("vol-5678"),
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
return state
|
||||
}
|
||||
|
||||
func TestStepSnapshot_run_simple(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig() //from builder_test
|
||||
|
||||
//Set some snapshot settings
|
||||
config["ebs_volumes"] = []map[string]interface{}{
|
||||
{
|
||||
"device_name": "/dev/xvdb",
|
||||
"volume_size": "32",
|
||||
"delete_on_termination": true,
|
||||
"snapshot_volume": true,
|
||||
},
|
||||
}
|
||||
|
||||
generatedData, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
if len(generatedData) == 0 {
|
||||
t.Fatalf("Generated data should not be empty")
|
||||
}
|
||||
|
||||
state := tState(t)
|
||||
|
||||
accessConfig := common.FakeAccessConfig()
|
||||
|
||||
step := stepSnapshotEBSVolumes{
|
||||
PollingConfig: new(common.AWSPollingConfig),
|
||||
AccessConfig: accessConfig,
|
||||
VolumeMapping: b.config.VolumeMappings,
|
||||
Ctx: b.config.ctx,
|
||||
}
|
||||
|
||||
step.Run(context.Background(), state)
|
||||
|
||||
if len(step.snapshotMap) != 1 {
|
||||
t.Fatalf("Missing Snapshot from step")
|
||||
}
|
||||
|
||||
if volmapping := step.snapshotMap["snap-of-vol-5678"]; volmapping == nil {
|
||||
t.Fatalf("Didn't snapshot correct volume: Map is %#v", step.snapshotMap)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepSnapshot_run_no_snaps(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig() //from builder_test
|
||||
|
||||
//Set some snapshot settings
|
||||
config["ebs_volumes"] = []map[string]interface{}{
|
||||
{
|
||||
"device_name": "/dev/xvdb",
|
||||
"volume_size": "32",
|
||||
"delete_on_termination": true,
|
||||
"snapshot_volume": false,
|
||||
},
|
||||
}
|
||||
|
||||
generatedData, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
if len(generatedData) == 0 {
|
||||
t.Fatalf("Generated data should not be empty")
|
||||
}
|
||||
|
||||
state := tState(t)
|
||||
|
||||
accessConfig := common.FakeAccessConfig()
|
||||
|
||||
step := stepSnapshotEBSVolumes{
|
||||
PollingConfig: new(common.AWSPollingConfig),
|
||||
AccessConfig: accessConfig,
|
||||
VolumeMapping: b.config.VolumeMappings,
|
||||
Ctx: b.config.ctx,
|
||||
}
|
||||
|
||||
step.Run(context.Background(), state)
|
||||
|
||||
if len(step.snapshotMap) != 0 {
|
||||
t.Fatalf("Shouldn't have snapshotted any volumes")
|
||||
}
|
||||
}
|
|
@ -233,7 +233,9 @@ func NewAzureClient(subscriptionID, sigSubscriptionID, resourceGroupName, storag
|
|||
azureClient.GalleryImageVersionsClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient))
|
||||
azureClient.GalleryImageVersionsClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(version.AzurePluginVersion.FormattedVersion()), azureClient.GalleryImageVersionsClient.UserAgent)
|
||||
azureClient.GalleryImageVersionsClient.Client.PollingDuration = sharedGalleryTimeout
|
||||
azureClient.GalleryImageVersionsClient.SubscriptionID = sigSubscriptionID
|
||||
if sigSubscriptionID != "" {
|
||||
azureClient.GalleryImageVersionsClient.SubscriptionID = sigSubscriptionID
|
||||
}
|
||||
|
||||
azureClient.GalleryImagesClient = newCompute.NewGalleryImagesClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID)
|
||||
azureClient.GalleryImagesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
|
@ -241,7 +243,9 @@ func NewAzureClient(subscriptionID, sigSubscriptionID, resourceGroupName, storag
|
|||
azureClient.GalleryImagesClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient))
|
||||
azureClient.GalleryImagesClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(version.AzurePluginVersion.FormattedVersion()), azureClient.GalleryImagesClient.UserAgent)
|
||||
azureClient.GalleryImagesClient.Client.PollingDuration = pollingDuration
|
||||
azureClient.GalleryImagesClient.SubscriptionID = sigSubscriptionID
|
||||
if sigSubscriptionID != "" {
|
||||
azureClient.GalleryImagesClient.SubscriptionID = sigSubscriptionID
|
||||
}
|
||||
|
||||
keyVaultURL, err := url.Parse(cloud.KeyVaultEndpoint)
|
||||
if err != nil {
|
||||
|
|
|
@ -23,6 +23,7 @@ type FlatConfig struct {
|
|||
ClientID *string `mapstructure:"client_id" cty:"client_id" hcl:"client_id"`
|
||||
ClientSecret *string `mapstructure:"client_secret" cty:"client_secret" hcl:"client_secret"`
|
||||
ClientCertPath *string `mapstructure:"client_cert_path" cty:"client_cert_path" hcl:"client_cert_path"`
|
||||
ClientCertExpireTimeout *string `mapstructure:"client_cert_token_timeout" required:"false" cty:"client_cert_token_timeout" hcl:"client_cert_token_timeout"`
|
||||
ClientJWT *string `mapstructure:"client_jwt" cty:"client_jwt" hcl:"client_jwt"`
|
||||
ObjectID *string `mapstructure:"object_id" cty:"object_id" hcl:"object_id"`
|
||||
TenantID *string `mapstructure:"tenant_id" required:"false" cty:"tenant_id" hcl:"tenant_id"`
|
||||
|
@ -151,6 +152,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
|||
"client_id": &hcldec.AttrSpec{Name: "client_id", Type: cty.String, Required: false},
|
||||
"client_secret": &hcldec.AttrSpec{Name: "client_secret", Type: cty.String, Required: false},
|
||||
"client_cert_path": &hcldec.AttrSpec{Name: "client_cert_path", Type: cty.String, Required: false},
|
||||
"client_cert_token_timeout": &hcldec.AttrSpec{Name: "client_cert_token_timeout", Type: cty.String, Required: false},
|
||||
"client_jwt": &hcldec.AttrSpec{Name: "client_jwt", Type: cty.String, Required: false},
|
||||
"object_id": &hcldec.AttrSpec{Name: "object_id", Type: cty.String, Required: false},
|
||||
"tenant_id": &hcldec.AttrSpec{Name: "tenant_id", Type: cty.String, Required: false},
|
||||
|
|
|
@ -22,6 +22,7 @@ type FlatConfig struct {
|
|||
ClientID *string `mapstructure:"client_id" cty:"client_id" hcl:"client_id"`
|
||||
ClientSecret *string `mapstructure:"client_secret" cty:"client_secret" hcl:"client_secret"`
|
||||
ClientCertPath *string `mapstructure:"client_cert_path" cty:"client_cert_path" hcl:"client_cert_path"`
|
||||
ClientCertExpireTimeout *string `mapstructure:"client_cert_token_timeout" required:"false" cty:"client_cert_token_timeout" hcl:"client_cert_token_timeout"`
|
||||
ClientJWT *string `mapstructure:"client_jwt" cty:"client_jwt" hcl:"client_jwt"`
|
||||
ObjectID *string `mapstructure:"object_id" cty:"object_id" hcl:"object_id"`
|
||||
TenantID *string `mapstructure:"tenant_id" required:"false" cty:"tenant_id" hcl:"tenant_id"`
|
||||
|
@ -76,6 +77,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
|||
"client_id": &hcldec.AttrSpec{Name: "client_id", Type: cty.String, Required: false},
|
||||
"client_secret": &hcldec.AttrSpec{Name: "client_secret", Type: cty.String, Required: false},
|
||||
"client_cert_path": &hcldec.AttrSpec{Name: "client_cert_path", Type: cty.String, Required: false},
|
||||
"client_cert_token_timeout": &hcldec.AttrSpec{Name: "client_cert_token_timeout", Type: cty.String, Required: false},
|
||||
"client_jwt": &hcldec.AttrSpec{Name: "client_jwt", Type: cty.String, Required: false},
|
||||
"object_id": &hcldec.AttrSpec{Name: "object_id", Type: cty.String, Required: false},
|
||||
"tenant_id": &hcldec.AttrSpec{Name: "tenant_id", Type: cty.String, Required: false},
|
||||
|
|
|
@ -39,6 +39,8 @@ type Config struct {
|
|||
// The path to a pem-encoded certificate that will be used to authenticate
|
||||
// as the specified AAD SP.
|
||||
ClientCertPath string `mapstructure:"client_cert_path"`
|
||||
// The timeout for the JWT Token when using a [client certificate](#client_cert_path). Defaults to 1 hour.
|
||||
ClientCertExpireTimeout time.Duration `mapstructure:"client_cert_token_timeout" required:"false"`
|
||||
// A JWT bearer token for client auth (RFC 7523, Sec. 2.2) that will be used
|
||||
// to authenticate the AAD SP. Provides more control over token the expiration
|
||||
// when using certificate authentication than when using `client_cert_path`.
|
||||
|
@ -163,6 +165,9 @@ func (c Config) Validate(errs *packersdk.MultiError) {
|
|||
if _, err := os.Stat(c.ClientCertPath); err != nil {
|
||||
errs = packersdk.MultiErrorAppend(errs, fmt.Errorf("client_cert_path is not an accessible file: %v", err))
|
||||
}
|
||||
if c.ClientCertExpireTimeout < 5*time.Minute {
|
||||
errs = packersdk.MultiErrorAppend(errs, fmt.Errorf("client_cert_token_timeout will expire within 5 minutes, please set a value greater than 5 minutes"))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -259,7 +264,7 @@ func (c Config) GetServicePrincipalToken(
|
|||
auth = NewSecretOAuthTokenProvider(*c.cloudEnvironment, c.ClientID, c.ClientSecret, c.TenantID)
|
||||
case authTypeClientCert:
|
||||
say("Getting tokens using client certificate")
|
||||
auth, err = NewCertOAuthTokenProvider(*c.cloudEnvironment, c.ClientID, c.ClientCertPath, c.TenantID)
|
||||
auth, err = NewCertOAuthTokenProvider(*c.cloudEnvironment, c.ClientID, c.ClientCertPath, c.TenantID, c.ClientCertExpireTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -336,6 +341,10 @@ func (c *Config) FillParameters() error {
|
|||
c.TenantID = tenantID
|
||||
}
|
||||
|
||||
if c.ClientCertExpireTimeout == 0 {
|
||||
c.ClientCertExpireTimeout = time.Hour
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -95,6 +95,16 @@ func Test_ClientConfig_RequiredParametersSet(t *testing.T) {
|
|||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "client_cert_token_timeout should be 5 minutes or more",
|
||||
config: Config{
|
||||
SubscriptionID: "ok",
|
||||
ClientID: "ok",
|
||||
ClientCertPath: "/dev/null",
|
||||
ClientCertExpireTimeout: 1 * time.Minute,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "too many client_* values",
|
||||
config: Config{
|
||||
|
|
|
@ -18,14 +18,14 @@ import (
|
|||
"github.com/hashicorp/packer/builder/azure/pkcs12"
|
||||
)
|
||||
|
||||
func NewCertOAuthTokenProvider(env azure.Environment, clientID, clientCertPath, tenantID string) (oAuthTokenProvider, error) {
|
||||
func NewCertOAuthTokenProvider(env azure.Environment, clientID, clientCertPath, tenantID string, certExpireTimeout time.Duration) (oAuthTokenProvider, error) {
|
||||
cert, key, err := readCert(clientCertPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error reading certificate: %v", err)
|
||||
}
|
||||
|
||||
audience := fmt.Sprintf("%s%s/oauth2/token", env.ActiveDirectoryEndpoint, tenantID)
|
||||
jwt, err := makeJWT(clientID, audience, cert, key, time.Hour, true)
|
||||
jwt, err := makeJWT(clientID, audience, cert, key, certExpireTimeout, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error generating JWT: %v", err)
|
||||
}
|
||||
|
|
|
@ -49,6 +49,7 @@ type FlatConfig struct {
|
|||
ClientID *string `mapstructure:"client_id" cty:"client_id" hcl:"client_id"`
|
||||
ClientSecret *string `mapstructure:"client_secret" cty:"client_secret" hcl:"client_secret"`
|
||||
ClientCertPath *string `mapstructure:"client_cert_path" cty:"client_cert_path" hcl:"client_cert_path"`
|
||||
ClientCertExpireTimeout *string `mapstructure:"client_cert_token_timeout" required:"false" cty:"client_cert_token_timeout" hcl:"client_cert_token_timeout"`
|
||||
ClientJWT *string `mapstructure:"client_jwt" cty:"client_jwt" hcl:"client_jwt"`
|
||||
ObjectID *string `mapstructure:"object_id" cty:"object_id" hcl:"object_id"`
|
||||
TenantID *string `mapstructure:"tenant_id" required:"false" cty:"tenant_id" hcl:"tenant_id"`
|
||||
|
@ -163,6 +164,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
|||
"client_id": &hcldec.AttrSpec{Name: "client_id", Type: cty.String, Required: false},
|
||||
"client_secret": &hcldec.AttrSpec{Name: "client_secret", Type: cty.String, Required: false},
|
||||
"client_cert_path": &hcldec.AttrSpec{Name: "client_cert_path", Type: cty.String, Required: false},
|
||||
"client_cert_token_timeout": &hcldec.AttrSpec{Name: "client_cert_token_timeout", Type: cty.String, Required: false},
|
||||
"client_jwt": &hcldec.AttrSpec{Name: "client_jwt", Type: cty.String, Required: false},
|
||||
"object_id": &hcldec.AttrSpec{Name: "object_id", Type: cty.String, Required: false},
|
||||
"tenant_id": &hcldec.AttrSpec{Name: "tenant_id", Type: cty.String, Required: false},
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
func TestExportArtifact_impl(t *testing.T) {
|
||||
var _ packersdk.Artifact = new(ExportArtifact)
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
func TestImportArtifact_impl(t *testing.T) {
|
||||
var _ packersdk.Artifact = new(ImportArtifact)
|
||||
}
|
||||
|
||||
func TestImportArtifactBuilderId(t *testing.T) {
|
||||
a := &ImportArtifact{BuilderIdValue: "foo"}
|
||||
if a.BuilderId() != "foo" {
|
||||
t.Fatalf("bad: %#v", a.BuilderId())
|
||||
}
|
||||
}
|
||||
|
||||
func TestImportArtifactFiles(t *testing.T) {
|
||||
a := &ImportArtifact{}
|
||||
if a.Files() != nil {
|
||||
t.Fatalf("bad: %#v", a.Files())
|
||||
}
|
||||
}
|
||||
|
||||
func TestImportArtifactId(t *testing.T) {
|
||||
a := &ImportArtifact{IdValue: "foo"}
|
||||
if a.Id() != "foo" {
|
||||
t.Fatalf("bad: %#v", a.Id())
|
||||
}
|
||||
}
|
||||
|
||||
func TestImportArtifactDestroy(t *testing.T) {
|
||||
d := new(MockDriver)
|
||||
a := &ImportArtifact{
|
||||
Driver: d,
|
||||
IdValue: "foo",
|
||||
}
|
||||
|
||||
// No error
|
||||
if err := a.Destroy(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if !d.DeleteImageCalled {
|
||||
t.Fatal("delete image should be called")
|
||||
}
|
||||
if d.DeleteImageId != "foo" {
|
||||
t.Fatalf("bad: %#v", d.DeleteImageId)
|
||||
}
|
||||
|
||||
// With an error
|
||||
d.DeleteImageErr = errors.New("foo")
|
||||
if err := a.Destroy(); err != d.DeleteImageErr {
|
||||
t.Fatalf("err: %#v", err)
|
||||
}
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
func TestBuilder_implBuilder(t *testing.T) {
|
||||
var _ packersdk.Builder = new(Builder)
|
||||
}
|
|
@ -1,239 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
|
||||
builderT "github.com/hashicorp/packer-plugin-sdk/acctest"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// RenderConfig helps create dynamic packer template configs for parsing by
|
||||
// builderT without having to write the config to a file.
|
||||
func RenderConfig(builderConfig map[string]interface{}, provisionerConfig []map[string]interface{}) string {
|
||||
// set up basic build template
|
||||
t := map[string][]map[string]interface{}{
|
||||
"builders": {
|
||||
// Setup basic docker config
|
||||
map[string]interface{}{
|
||||
"type": "test",
|
||||
"image": "ubuntu",
|
||||
"discard": true,
|
||||
},
|
||||
},
|
||||
"provisioners": []map[string]interface{}{},
|
||||
}
|
||||
// apply special builder overrides
|
||||
for k, v := range builderConfig {
|
||||
t["builders"][0][k] = v
|
||||
}
|
||||
// Apply special provisioner overrides
|
||||
t["provisioners"] = append(t["provisioners"], provisionerConfig...)
|
||||
|
||||
j, _ := json.Marshal(t)
|
||||
return string(j)
|
||||
}
|
||||
|
||||
// TestUploadDownload verifies that basic upload / download functionality works
|
||||
func TestUploadDownload(t *testing.T) {
|
||||
if os.Getenv("PACKER_ACC") == "" {
|
||||
t.Skip("This test is only run with PACKER_ACC=1")
|
||||
}
|
||||
|
||||
dockerBuilderExtraConfig := map[string]interface{}{
|
||||
"run_command": []string{"-d", "-i", "-t", "{{.Image}}", "/bin/sh"},
|
||||
}
|
||||
|
||||
dockerProvisionerConfig := []map[string]interface{}{
|
||||
{
|
||||
"type": "file",
|
||||
"source": "test-fixtures/onecakes/strawberry",
|
||||
"destination": "/strawberry-cake",
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"source": "/strawberry-cake",
|
||||
"destination": "my-strawberry-cake",
|
||||
"direction": "download",
|
||||
},
|
||||
}
|
||||
|
||||
configString := RenderConfig(dockerBuilderExtraConfig, dockerProvisionerConfig)
|
||||
|
||||
// this should be a precheck
|
||||
cmd := exec.Command("docker", "-v")
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
t.Error("docker command not found; please make sure docker is installed")
|
||||
}
|
||||
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
Builder: &Builder{},
|
||||
Template: configString,
|
||||
Check: func(a []packersdk.Artifact) error {
|
||||
// Verify that the thing we downloaded is the same thing we sent up.
|
||||
// Complain loudly if it isn't.
|
||||
inputFile, err := ioutil.ReadFile("test-fixtures/onecakes/strawberry")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to read input file: %s", err)
|
||||
}
|
||||
outputFile, err := ioutil.ReadFile("my-strawberry-cake")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to read output file: %s", err)
|
||||
}
|
||||
if sha256.Sum256(inputFile) != sha256.Sum256(outputFile) {
|
||||
return fmt.Errorf("Input and output files do not match\n"+
|
||||
"Input:\n%s\nOutput:\n%s\n", inputFile, outputFile)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Teardown: func() error {
|
||||
// Cleanup. Honestly I don't know why you would want to get rid
|
||||
// of my strawberry cake. It's so tasty! Do you not like cake? Are you a
|
||||
// cake-hater? Or are you keeping all the cake all for yourself? So selfish!
|
||||
os.Remove("my-strawberry-cake")
|
||||
return nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestLargeDownload verifies that files are the appropriate size after being
|
||||
// downloaded. This is to identify and fix the race condition in #2793. You may
|
||||
// need to use github.com/cbednarski/rerun to verify since this problem occurs
|
||||
// only intermittently.
|
||||
func TestLargeDownload(t *testing.T) {
|
||||
if os.Getenv("PACKER_ACC") == "" {
|
||||
t.Skip("This test is only run with PACKER_ACC=1")
|
||||
}
|
||||
|
||||
dockerProvisionerConfig := []map[string]interface{}{
|
||||
{
|
||||
"type": "shell",
|
||||
"inline": []string{
|
||||
"dd if=/dev/urandom of=/tmp/cupcake bs=1M count=2",
|
||||
"dd if=/dev/urandom of=/tmp/bigcake bs=1M count=100",
|
||||
"sync",
|
||||
"md5sum /tmp/cupcake /tmp/bigcake",
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"source": "/tmp/cupcake",
|
||||
"destination": "cupcake",
|
||||
"direction": "download",
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"source": "/tmp/bigcake",
|
||||
"destination": "bigcake",
|
||||
"direction": "download",
|
||||
},
|
||||
}
|
||||
|
||||
configString := RenderConfig(map[string]interface{}{}, dockerProvisionerConfig)
|
||||
|
||||
// this should be a precheck
|
||||
cmd := exec.Command("docker", "-v")
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
t.Error("docker command not found; please make sure docker is installed")
|
||||
}
|
||||
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
Builder: &Builder{},
|
||||
Template: configString,
|
||||
Check: func(a []packersdk.Artifact) error {
|
||||
// Verify that the things we downloaded are the right size. Complain loudly
|
||||
// if they are not.
|
||||
//
|
||||
// cupcake should be 2097152 bytes
|
||||
// bigcake should be 104857600 bytes
|
||||
cupcake, err := os.Stat("cupcake")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to stat cupcake file: %s", err)
|
||||
}
|
||||
cupcakeExpected := int64(2097152)
|
||||
if cupcake.Size() != cupcakeExpected {
|
||||
t.Errorf("Expected cupcake to be %d bytes; found %d", cupcakeExpected, cupcake.Size())
|
||||
}
|
||||
|
||||
bigcake, err := os.Stat("bigcake")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to stat bigcake file: %s", err)
|
||||
}
|
||||
bigcakeExpected := int64(104857600)
|
||||
if bigcake.Size() != bigcakeExpected {
|
||||
t.Errorf("Expected bigcake to be %d bytes; found %d", bigcakeExpected, bigcake.Size())
|
||||
}
|
||||
|
||||
// TODO if we can, calculate a sha inside the container and compare to the
|
||||
// one we get after we pull it down. We will probably have to parse the log
|
||||
// or ui output to do this because we use /dev/urandom to create the file.
|
||||
|
||||
// if sha256.Sum256(inputFile) != sha256.Sum256(outputFile) {
|
||||
// t.Fatalf("Input and output files do not match\n"+
|
||||
// "Input:\n%s\nOutput:\n%s\n", inputFile, outputFile)
|
||||
// }
|
||||
return nil
|
||||
},
|
||||
Teardown: func() error {
|
||||
os.Remove("cupcake")
|
||||
os.Remove("bigcake")
|
||||
return nil
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// TestFixUploadOwner verifies that owner of uploaded files is the user the container is running as.
|
||||
func TestFixUploadOwner(t *testing.T) {
|
||||
if os.Getenv("PACKER_ACC") == "" {
|
||||
t.Skip("This test is only run with PACKER_ACC=1")
|
||||
}
|
||||
|
||||
cmd := exec.Command("docker", "-v")
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
t.Error("docker command not found; please make sure docker is installed")
|
||||
}
|
||||
|
||||
dockerBuilderExtraConfig := map[string]interface{}{
|
||||
"run_command": []string{"-d", "-i", "-t", "-u", "42", "{{.Image}}", "/bin/sh"},
|
||||
}
|
||||
|
||||
testFixUploadOwnerProvisionersTemplate := []map[string]interface{}{
|
||||
{
|
||||
"type": "file",
|
||||
"source": "test-fixtures/onecakes/strawberry",
|
||||
"destination": "/tmp/strawberry-cake",
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"source": "test-fixtures/manycakes",
|
||||
"destination": "/tmp/",
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"inline": "touch /tmp/testUploadOwner",
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"inline": []string{
|
||||
"[ $(stat -c %u /tmp/strawberry-cake) -eq 42 ] || (echo 'Invalid owner of /tmp/strawberry-cake' && exit 1)",
|
||||
"[ $(stat -c %u /tmp/testUploadOwner) -eq 42 ] || (echo 'Invalid owner of /tmp/testUploadOwner' && exit 1)",
|
||||
"find /tmp/manycakes | xargs -n1 -IFILE /bin/sh -c '[ $(stat -c %u FILE) -eq 42 ] || (echo \"Invalid owner of FILE\" && exit 1)'",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
configString := RenderConfig(dockerBuilderExtraConfig, testFixUploadOwnerProvisionersTemplate)
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
Builder: &Builder{},
|
||||
Template: configString,
|
||||
})
|
||||
}
|
|
@ -1,147 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testConfig() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"export_path": "foo",
|
||||
"image": "bar",
|
||||
}
|
||||
}
|
||||
|
||||
func testConfigStruct(t *testing.T) *Config {
|
||||
var c Config
|
||||
warns, errs := c.Prepare(testConfig())
|
||||
if len(warns) > 0 {
|
||||
t.Fatalf("bad: %#v", len(warns))
|
||||
}
|
||||
if errs != nil {
|
||||
t.Fatalf("bad: %#v", errs)
|
||||
}
|
||||
|
||||
return &c
|
||||
}
|
||||
|
||||
func testConfigErr(t *testing.T, warns []string, err error) {
|
||||
if len(warns) > 0 {
|
||||
t.Fatalf("bad: %#v", warns)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should error")
|
||||
}
|
||||
}
|
||||
|
||||
func testConfigOk(t *testing.T, warns []string, err error) {
|
||||
if len(warns) > 0 {
|
||||
t.Fatalf("bad: %#v", warns)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigPrepare_exportPath(t *testing.T) {
|
||||
td, err := ioutil.TempDir("", "packer")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(td)
|
||||
|
||||
raw := testConfig()
|
||||
|
||||
// No export path. This is invalid. Previously this would not error during
|
||||
// validation and as a result the failure would happen at build time.
|
||||
delete(raw, "export_path")
|
||||
var c Config
|
||||
warns, errs := c.Prepare(raw)
|
||||
testConfigErr(t, warns, errs)
|
||||
|
||||
// Good export path
|
||||
raw["export_path"] = "good"
|
||||
warns, errs = c.Prepare(raw)
|
||||
testConfigOk(t, warns, errs)
|
||||
|
||||
// Bad export path (directory)
|
||||
raw["export_path"] = td
|
||||
warns, errs = c.Prepare(raw)
|
||||
testConfigErr(t, warns, errs)
|
||||
}
|
||||
|
||||
func TestConfigPrepare_exportPathAndCommit(t *testing.T) {
|
||||
raw := testConfig()
|
||||
|
||||
// Export but no commit (explicit default)
|
||||
raw["commit"] = false
|
||||
warns, errs := (&Config{}).Prepare(raw)
|
||||
testConfigOk(t, warns, errs)
|
||||
|
||||
// Commit AND export specified (invalid)
|
||||
raw["commit"] = true
|
||||
warns, errs = (&Config{}).Prepare(raw)
|
||||
testConfigErr(t, warns, errs)
|
||||
|
||||
// Commit but no export
|
||||
delete(raw, "export_path")
|
||||
warns, errs = (&Config{}).Prepare(raw)
|
||||
testConfigOk(t, warns, errs)
|
||||
}
|
||||
|
||||
func TestConfigPrepare_exportDiscard(t *testing.T) {
|
||||
raw := testConfig()
|
||||
|
||||
// Export but no discard (explicit default)
|
||||
raw["discard"] = false
|
||||
warns, errs := (&Config{}).Prepare(raw)
|
||||
testConfigOk(t, warns, errs)
|
||||
|
||||
// Discard AND export (invalid)
|
||||
raw["discard"] = true
|
||||
warns, errs = (&Config{}).Prepare(raw)
|
||||
testConfigErr(t, warns, errs)
|
||||
|
||||
// Discard but no export
|
||||
raw["discard"] = true
|
||||
delete(raw, "export_path")
|
||||
warns, errs = (&Config{}).Prepare(raw)
|
||||
testConfigOk(t, warns, errs)
|
||||
}
|
||||
|
||||
func TestConfigPrepare_image(t *testing.T) {
|
||||
raw := testConfig()
|
||||
|
||||
// No image
|
||||
delete(raw, "image")
|
||||
var c Config
|
||||
warns, errs := c.Prepare(raw)
|
||||
testConfigErr(t, warns, errs)
|
||||
|
||||
// Good image
|
||||
raw["image"] = "path"
|
||||
warns, errs = c.Prepare(raw)
|
||||
testConfigOk(t, warns, errs)
|
||||
}
|
||||
|
||||
func TestConfigPrepare_pull(t *testing.T) {
|
||||
raw := testConfig()
|
||||
|
||||
// No pull set
|
||||
delete(raw, "pull")
|
||||
var c Config
|
||||
warns, errs := c.Prepare(raw)
|
||||
testConfigOk(t, warns, errs)
|
||||
if !c.Pull {
|
||||
t.Fatal("should pull by default")
|
||||
}
|
||||
|
||||
// Pull set
|
||||
raw["pull"] = false
|
||||
warns, errs = c.Prepare(raw)
|
||||
testConfigOk(t, warns, errs)
|
||||
if c.Pull {
|
||||
t.Fatal("should not pull")
|
||||
}
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
package docker
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestDockerDriver_impl(t *testing.T) {
|
||||
var _ Driver = new(DockerDriver)
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
package docker
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestMockDriver_impl(t *testing.T) {
|
||||
var _ Driver = new(MockDriver)
|
||||
}
|
|
@ -1,68 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
)
|
||||
|
||||
func testStepCommitState(t *testing.T) multistep.StateBag {
|
||||
state := testState(t)
|
||||
state.Put("container_id", "foo")
|
||||
return state
|
||||
}
|
||||
|
||||
func TestStepCommit_impl(t *testing.T) {
|
||||
var _ multistep.Step = new(StepCommit)
|
||||
}
|
||||
|
||||
func TestStepCommit(t *testing.T) {
|
||||
state := testStepCommitState(t)
|
||||
step := new(StepCommit)
|
||||
defer step.Cleanup(state)
|
||||
|
||||
driver := state.Get("driver").(*MockDriver)
|
||||
driver.CommitImageId = "bar"
|
||||
|
||||
// run the step
|
||||
if action := step.Run(context.Background(), state); action != multistep.ActionContinue {
|
||||
t.Fatalf("bad action: %#v", action)
|
||||
}
|
||||
|
||||
// verify we did the right thing
|
||||
if !driver.CommitCalled {
|
||||
t.Fatal("should've called")
|
||||
}
|
||||
|
||||
// verify the ID is saved
|
||||
idRaw, ok := state.GetOk("image_id")
|
||||
if !ok {
|
||||
t.Fatal("should've saved ID")
|
||||
}
|
||||
|
||||
id := idRaw.(string)
|
||||
if id != driver.CommitImageId {
|
||||
t.Fatalf("bad: %#v", id)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepCommit_error(t *testing.T) {
|
||||
state := testStepCommitState(t)
|
||||
step := new(StepCommit)
|
||||
defer step.Cleanup(state)
|
||||
|
||||
driver := state.Get("driver").(*MockDriver)
|
||||
driver.CommitErr = errors.New("foo")
|
||||
|
||||
// run the step
|
||||
if action := step.Run(context.Background(), state); action != multistep.ActionHalt {
|
||||
t.Fatalf("bad action: %#v", action)
|
||||
}
|
||||
|
||||
// verify the ID is not saved
|
||||
if _, ok := state.GetOk("image_id"); ok {
|
||||
t.Fatal("shouldn't save image ID")
|
||||
}
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
)
|
||||
|
||||
func testStepExportState(t *testing.T) multistep.StateBag {
|
||||
state := testState(t)
|
||||
state.Put("container_id", "foo")
|
||||
return state
|
||||
}
|
||||
|
||||
func TestStepExport_impl(t *testing.T) {
|
||||
var _ multistep.Step = new(StepExport)
|
||||
}
|
||||
|
||||
func TestStepExport(t *testing.T) {
|
||||
state := testStepExportState(t)
|
||||
step := new(StepExport)
|
||||
defer step.Cleanup(state)
|
||||
|
||||
// Create a tempfile for our output path
|
||||
tf, err := ioutil.TempFile("", "packer")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
tf.Close()
|
||||
defer os.Remove(tf.Name())
|
||||
|
||||
config := state.Get("config").(*Config)
|
||||
config.ExportPath = tf.Name()
|
||||
driver := state.Get("driver").(*MockDriver)
|
||||
driver.ExportReader = bytes.NewReader([]byte("data!"))
|
||||
|
||||
// run the step
|
||||
if action := step.Run(context.Background(), state); action != multistep.ActionContinue {
|
||||
t.Fatalf("bad action: %#v", action)
|
||||
}
|
||||
|
||||
// verify we did the right thing
|
||||
if !driver.ExportCalled {
|
||||
t.Fatal("should've exported")
|
||||
}
|
||||
if driver.ExportID != "foo" {
|
||||
t.Fatalf("bad: %#v", driver.ExportID)
|
||||
}
|
||||
|
||||
// verify the data exported to the file
|
||||
contents, err := ioutil.ReadFile(tf.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if string(contents) != "data!" {
|
||||
t.Fatalf("bad: %#v", string(contents))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepExport_error(t *testing.T) {
|
||||
state := testStepExportState(t)
|
||||
step := new(StepExport)
|
||||
defer step.Cleanup(state)
|
||||
|
||||
// Create a tempfile for our output path
|
||||
tf, err := ioutil.TempFile("", "packer")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
tf.Close()
|
||||
|
||||
if err := os.Remove(tf.Name()); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
config := state.Get("config").(*Config)
|
||||
config.ExportPath = tf.Name()
|
||||
driver := state.Get("driver").(*MockDriver)
|
||||
driver.ExportError = errors.New("foo")
|
||||
|
||||
// run the step
|
||||
if action := step.Run(context.Background(), state); action != multistep.ActionHalt {
|
||||
t.Fatalf("bad action: %#v", action)
|
||||
}
|
||||
|
||||
// verify we have an error
|
||||
if _, ok := state.GetOk("error"); !ok {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
|
||||
// verify we didn't make that file
|
||||
if _, err := os.Stat(tf.Name()); err == nil {
|
||||
t.Fatal("export path shouldn't exist")
|
||||
}
|
||||
}
|
|
@ -1,104 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
)
|
||||
|
||||
func TestStepPull_impl(t *testing.T) {
|
||||
var _ multistep.Step = new(StepPull)
|
||||
}
|
||||
|
||||
func TestStepPull(t *testing.T) {
|
||||
state := testState(t)
|
||||
step := new(StepPull)
|
||||
defer step.Cleanup(state)
|
||||
|
||||
config := state.Get("config").(*Config)
|
||||
driver := state.Get("driver").(*MockDriver)
|
||||
|
||||
// run the step
|
||||
if action := step.Run(context.Background(), state); action != multistep.ActionContinue {
|
||||
t.Fatalf("bad action: %#v", action)
|
||||
}
|
||||
|
||||
// verify we did the right thing
|
||||
if !driver.PullCalled {
|
||||
t.Fatal("should've pulled")
|
||||
}
|
||||
if driver.PullImage != config.Image {
|
||||
t.Fatalf("bad: %#v", driver.PullImage)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepPull_error(t *testing.T) {
|
||||
state := testState(t)
|
||||
step := new(StepPull)
|
||||
defer step.Cleanup(state)
|
||||
|
||||
driver := state.Get("driver").(*MockDriver)
|
||||
driver.PullError = errors.New("foo")
|
||||
|
||||
// run the step
|
||||
if action := step.Run(context.Background(), state); action != multistep.ActionHalt {
|
||||
t.Fatalf("bad action: %#v", action)
|
||||
}
|
||||
|
||||
// verify we have an error
|
||||
if _, ok := state.GetOk("error"); !ok {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepPull_login(t *testing.T) {
|
||||
state := testState(t)
|
||||
step := new(StepPull)
|
||||
defer step.Cleanup(state)
|
||||
|
||||
config := state.Get("config").(*Config)
|
||||
driver := state.Get("driver").(*MockDriver)
|
||||
|
||||
config.Login = true
|
||||
|
||||
// run the step
|
||||
if action := step.Run(context.Background(), state); action != multistep.ActionContinue {
|
||||
t.Fatalf("bad action: %#v", action)
|
||||
}
|
||||
|
||||
// verify we pulled
|
||||
if !driver.PullCalled {
|
||||
t.Fatal("should've pulled")
|
||||
}
|
||||
|
||||
// verify we logged in
|
||||
if !driver.LoginCalled {
|
||||
t.Fatal("should've logged in")
|
||||
}
|
||||
if !driver.LogoutCalled {
|
||||
t.Fatal("should've logged out")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepPull_noPull(t *testing.T) {
|
||||
state := testState(t)
|
||||
step := new(StepPull)
|
||||
defer step.Cleanup(state)
|
||||
|
||||
config := state.Get("config").(*Config)
|
||||
config.Pull = false
|
||||
|
||||
driver := state.Get("driver").(*MockDriver)
|
||||
|
||||
// run the step
|
||||
if action := step.Run(context.Background(), state); action != multistep.ActionContinue {
|
||||
t.Fatalf("bad action: %#v", action)
|
||||
}
|
||||
|
||||
// verify we did the right thing
|
||||
if driver.PullCalled {
|
||||
t.Fatal("shouldn't have pulled")
|
||||
}
|
||||
}
|
|
@ -1,97 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
)
|
||||
|
||||
func testStepRunState(t *testing.T) multistep.StateBag {
|
||||
state := testState(t)
|
||||
state.Put("temp_dir", "/foo")
|
||||
return state
|
||||
}
|
||||
|
||||
func TestStepRun_impl(t *testing.T) {
|
||||
var _ multistep.Step = new(StepRun)
|
||||
}
|
||||
|
||||
func TestStepRun(t *testing.T) {
|
||||
state := testStepRunState(t)
|
||||
step := new(StepRun)
|
||||
defer step.Cleanup(state)
|
||||
|
||||
config := state.Get("config").(*Config)
|
||||
driver := state.Get("driver").(*MockDriver)
|
||||
driver.StartID = "foo"
|
||||
|
||||
// run the step
|
||||
if action := step.Run(context.Background(), state); action != multistep.ActionContinue {
|
||||
t.Fatalf("bad action: %#v", action)
|
||||
}
|
||||
|
||||
// verify we did the right thing
|
||||
if !driver.StartCalled {
|
||||
t.Fatal("should've called")
|
||||
}
|
||||
if driver.StartConfig.Image != config.Image {
|
||||
t.Fatalf("bad: %#v", driver.StartConfig.Image)
|
||||
}
|
||||
|
||||
// verify the ID is saved
|
||||
idRaw, ok := state.GetOk("container_id")
|
||||
if !ok {
|
||||
t.Fatal("should've saved ID")
|
||||
}
|
||||
|
||||
id := idRaw.(string)
|
||||
if id != "foo" {
|
||||
t.Fatalf("bad: %#v", id)
|
||||
}
|
||||
|
||||
// Verify we haven't called stop yet
|
||||
if driver.KillCalled {
|
||||
t.Fatal("should not have stopped")
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
step.Cleanup(state)
|
||||
if !driver.KillCalled {
|
||||
t.Fatal("should've stopped")
|
||||
}
|
||||
if driver.KillID != id {
|
||||
t.Fatalf("bad: %#v", driver.StopID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepRun_error(t *testing.T) {
|
||||
state := testStepRunState(t)
|
||||
step := new(StepRun)
|
||||
defer step.Cleanup(state)
|
||||
|
||||
driver := state.Get("driver").(*MockDriver)
|
||||
driver.StartError = errors.New("foo")
|
||||
|
||||
// run the step
|
||||
if action := step.Run(context.Background(), state); action != multistep.ActionHalt {
|
||||
t.Fatalf("bad action: %#v", action)
|
||||
}
|
||||
|
||||
// verify the ID is not saved
|
||||
if _, ok := state.GetOk("container_id"); ok {
|
||||
t.Fatal("shouldn't save container ID")
|
||||
}
|
||||
|
||||
// Verify we haven't called stop yet
|
||||
if driver.KillCalled {
|
||||
t.Fatal("should not have stopped")
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
step.Cleanup(state)
|
||||
if driver.KillCalled {
|
||||
t.Fatal("should not have stopped")
|
||||
}
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packerbuilderdata"
|
||||
)
|
||||
|
||||
func TestStepSetGeneratedData_Run(t *testing.T) {
|
||||
state := testState(t)
|
||||
step := new(StepSetGeneratedData)
|
||||
step.GeneratedData = &packerbuilderdata.GeneratedData{State: state}
|
||||
driver := state.Get("driver").(*MockDriver)
|
||||
driver.Sha256Result = "80B3BB1B1696E73A9B19DEEF92F664F8979F948DF348088B61F9A3477655AF64"
|
||||
state.Put("image_id", "12345")
|
||||
|
||||
if action := step.Run(context.TODO(), state); action != multistep.ActionContinue {
|
||||
t.Fatalf("Should not halt")
|
||||
}
|
||||
if !driver.Sha256Called {
|
||||
t.Fatalf("driver.SHA256 should be called")
|
||||
}
|
||||
if driver.Sha256Id != "12345" {
|
||||
t.Fatalf("driver.SHA256 got wrong image it: %s", driver.Sha256Id)
|
||||
}
|
||||
genData := state.Get("generated_data").(map[string]interface{})
|
||||
imgSha256 := genData["ImageSha256"].(string)
|
||||
if imgSha256 != driver.Sha256Result {
|
||||
t.Fatalf("Expected ImageSha256 to be %s but was %s", driver.Sha256Result, imgSha256)
|
||||
}
|
||||
|
||||
// Image ID not implement
|
||||
state = testState(t)
|
||||
step.GeneratedData = &packerbuilderdata.GeneratedData{State: state}
|
||||
driver = state.Get("driver").(*MockDriver)
|
||||
notImplementedMsg := "ERR_IMAGE_SHA256_NOT_FOUND"
|
||||
|
||||
if action := step.Run(context.TODO(), state); action != multistep.ActionContinue {
|
||||
t.Fatalf("Should not halt")
|
||||
}
|
||||
if driver.Sha256Called {
|
||||
t.Fatalf("driver.SHA256 should not be called")
|
||||
}
|
||||
genData = state.Get("generated_data").(map[string]interface{})
|
||||
imgSha256 = genData["ImageSha256"].(string)
|
||||
if imgSha256 != notImplementedMsg {
|
||||
t.Fatalf("Expected ImageSha256 to be %s but was %s", notImplementedMsg, imgSha256)
|
||||
}
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
)
|
||||
|
||||
func TestStepTempDir_impl(t *testing.T) {
|
||||
var _ multistep.Step = new(StepTempDir)
|
||||
}
|
||||
|
||||
func testStepTempDir_impl(t *testing.T) string {
|
||||
state := testState(t)
|
||||
step := new(StepTempDir)
|
||||
defer step.Cleanup(state)
|
||||
|
||||
// sanity test
|
||||
if _, ok := state.GetOk("temp_dir"); ok {
|
||||
t.Fatalf("temp_dir should not be in state yet")
|
||||
}
|
||||
|
||||
// run the step
|
||||
if action := step.Run(context.Background(), state); action != multistep.ActionContinue {
|
||||
t.Fatalf("bad action: %#v", action)
|
||||
}
|
||||
|
||||
// Verify that we got the temp dir
|
||||
dirRaw, ok := state.GetOk("temp_dir")
|
||||
if !ok {
|
||||
t.Fatalf("should've made temp_dir")
|
||||
}
|
||||
dir := dirRaw.(string)
|
||||
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
t.Fatalf("Stat for %s failed: err: %s", err, dir)
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
step.Cleanup(state)
|
||||
if _, err := os.Stat(dir); err == nil {
|
||||
t.Fatalf("dir should be gone")
|
||||
}
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
func TestStepTempDir(t *testing.T) {
|
||||
testStepTempDir_impl(t)
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
func testState(t *testing.T) multistep.StateBag {
|
||||
state := new(multistep.BasicStateBag)
|
||||
state.Put("config", testConfigStruct(t))
|
||||
state.Put("driver", &MockDriver{})
|
||||
state.Put("hook", &packersdk.MockHook{})
|
||||
state.Put("ui", &packersdk.BasicUi{
|
||||
Reader: new(bytes.Buffer),
|
||||
Writer: new(bytes.Buffer),
|
||||
})
|
||||
return state
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
chocolate!
|
|
@ -1 +0,0 @@
|
|||
vanilla!
|
|
@ -1 +0,0 @@
|
|||
strawberry!
|
|
@ -1,13 +0,0 @@
|
|||
package version
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/packer-plugin-sdk/version"
|
||||
packerVersion "github.com/hashicorp/packer/version"
|
||||
)
|
||||
|
||||
var DigitalOceanPluginVersion *version.PluginVersion
|
||||
|
||||
func init() {
|
||||
DigitalOceanPluginVersion = version.InitializePluginVersion(
|
||||
packerVersion.Version, packerVersion.VersionPrerelease)
|
||||
}
|
|
@ -210,6 +210,8 @@ type Config struct {
|
|||
// - The contents of the script file will be wrapped in Packer's startup script wrapper, unless `wrap_startup_script` is disabled. See `wrap_startup_script` for more details.
|
||||
// - Not supported by Windows instances. See [Startup Scripts for Windows](https://cloud.google.com/compute/docs/startupscript#providing_a_startup_script_for_windows_instances) for more details.
|
||||
StartupScriptFile string `mapstructure:"startup_script_file" required:"false"`
|
||||
// The time to wait for windows password to be retrieved. Defaults to "3m".
|
||||
WindowsPasswordTimeout time.Duration `mapstructure:"windows_password_timeout" required:"false"`
|
||||
// For backwards compatibility this option defaults to `"true"` in the future it will default to `"false"`.
|
||||
// If "true", the contents of `startup_script_file` or `"startup_script"` in the instance metadata
|
||||
// is wrapped in a Packer specific script that tracks the execution and completion of the provided
|
||||
|
@ -542,6 +544,10 @@ func (c *Config) Prepare(raws ...interface{}) ([]string, error) {
|
|||
c.WrapStartupScriptFile = config.TriTrue
|
||||
}
|
||||
}
|
||||
// Check windows password timeout is provided
|
||||
if c.WindowsPasswordTimeout == 0 {
|
||||
c.WindowsPasswordTimeout = 3 * time.Minute
|
||||
}
|
||||
|
||||
// Check for any errors.
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
|
|
|
@ -112,6 +112,7 @@ type FlatConfig struct {
|
|||
SourceImageFamily *string `mapstructure:"source_image_family" required:"true" cty:"source_image_family" hcl:"source_image_family"`
|
||||
SourceImageProjectId []string `mapstructure:"source_image_project_id" required:"false" cty:"source_image_project_id" hcl:"source_image_project_id"`
|
||||
StartupScriptFile *string `mapstructure:"startup_script_file" required:"false" cty:"startup_script_file" hcl:"startup_script_file"`
|
||||
WindowsPasswordTimeout *string `mapstructure:"windows_password_timeout" required:"false" cty:"windows_password_timeout" hcl:"windows_password_timeout"`
|
||||
WrapStartupScriptFile *bool `mapstructure:"wrap_startup_script" required:"false" cty:"wrap_startup_script" hcl:"wrap_startup_script"`
|
||||
Subnetwork *string `mapstructure:"subnetwork" required:"false" cty:"subnetwork" hcl:"subnetwork"`
|
||||
Tags []string `mapstructure:"tags" required:"false" cty:"tags" hcl:"tags"`
|
||||
|
@ -236,6 +237,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
|||
"source_image_family": &hcldec.AttrSpec{Name: "source_image_family", Type: cty.String, Required: false},
|
||||
"source_image_project_id": &hcldec.AttrSpec{Name: "source_image_project_id", Type: cty.List(cty.String), Required: false},
|
||||
"startup_script_file": &hcldec.AttrSpec{Name: "startup_script_file", Type: cty.String, Required: false},
|
||||
"windows_password_timeout": &hcldec.AttrSpec{Name: "windows_password_timeout", Type: cty.String, Required: false},
|
||||
"wrap_startup_script": &hcldec.AttrSpec{Name: "wrap_startup_script", Type: cty.Bool, Required: false},
|
||||
"subnetwork": &hcldec.AttrSpec{Name: "subnetwork", Type: cty.String, Required: false},
|
||||
"tags": &hcldec.AttrSpec{Name: "tags", Type: cty.List(cty.String), Required: false},
|
||||
|
|
|
@ -107,13 +107,14 @@ type InstanceConfig struct {
|
|||
// WindowsPasswordConfig is the data structure that GCE needs to encrypt the created
|
||||
// windows password.
|
||||
type WindowsPasswordConfig struct {
|
||||
key *rsa.PrivateKey
|
||||
password string
|
||||
UserName string `json:"userName"`
|
||||
Modulus string `json:"modulus"`
|
||||
Exponent string `json:"exponent"`
|
||||
Email string `json:"email"`
|
||||
ExpireOn time.Time `json:"expireOn"`
|
||||
key *rsa.PrivateKey
|
||||
password string
|
||||
UserName string `json:"userName"`
|
||||
Modulus string `json:"modulus"`
|
||||
Exponent string `json:"exponent"`
|
||||
Email string `json:"email"`
|
||||
ExpireOn time.Time `json:"expireOn"`
|
||||
WindowsPasswordTimeout time.Duration `json:"timeout"`
|
||||
}
|
||||
|
||||
type windowsPasswordResponse struct {
|
||||
|
|
|
@ -253,7 +253,6 @@ func (d *driverGCE) GetImage(name string, fromFamily bool) (*Image, error) {
|
|||
"ubuntu-os-cloud",
|
||||
"windows-cloud",
|
||||
"windows-sql-cloud",
|
||||
"gce-uefi-images",
|
||||
"gce-nvme",
|
||||
// misc
|
||||
"google-containers",
|
||||
|
@ -568,7 +567,7 @@ func (d *driverGCE) createWindowsPassword(errCh chan<- error, name, zone string,
|
|||
return
|
||||
}
|
||||
|
||||
timeout := time.Now().Add(time.Minute * 3)
|
||||
timeout := time.Now().Add(c.WindowsPasswordTimeout)
|
||||
hash := sha1.New()
|
||||
random := rand.Reader
|
||||
|
||||
|
|
|
@ -60,12 +60,13 @@ func (s *StepCreateWindowsPassword) Run(ctx context.Context, state multistep.Sta
|
|||
}
|
||||
|
||||
data := WindowsPasswordConfig{
|
||||
key: priv,
|
||||
UserName: c.Comm.WinRMUser,
|
||||
Modulus: base64.StdEncoding.EncodeToString(priv.N.Bytes()),
|
||||
Exponent: base64.StdEncoding.EncodeToString(buf[1:]),
|
||||
Email: email,
|
||||
ExpireOn: time.Now().Add(time.Minute * 5),
|
||||
key: priv,
|
||||
UserName: c.Comm.WinRMUser,
|
||||
Modulus: base64.StdEncoding.EncodeToString(priv.N.Bytes()),
|
||||
Exponent: base64.StdEncoding.EncodeToString(buf[1:]),
|
||||
Email: email,
|
||||
ExpireOn: time.Now().Add(time.Minute * 5),
|
||||
WindowsPasswordTimeout: c.WindowsPasswordTimeout,
|
||||
}
|
||||
|
||||
if s.Debug {
|
||||
|
@ -98,7 +99,7 @@ func (s *StepCreateWindowsPassword) Run(ctx context.Context, state multistep.Sta
|
|||
ui.Message("Waiting for windows password to complete...")
|
||||
select {
|
||||
case err = <-errCh:
|
||||
case <-time.After(c.StateTimeout):
|
||||
case <-time.After(c.WindowsPasswordTimeout):
|
||||
err = errors.New("time out while waiting for the password to be created")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -294,7 +294,7 @@ func (b *Builder) Run(ctx context.Context, ui packersdk.Ui, hook packersdk.Hook)
|
|||
// configure the communicator ssh, winrm
|
||||
&communicator.StepConnect{
|
||||
Config: &b.config.SSHConfig.Comm,
|
||||
Host: hypervcommon.CommHost(b.config.SSHConfig.Comm.SSHHost),
|
||||
Host: hypervcommon.CommHost(b.config.SSHConfig.Comm.Host()),
|
||||
SSHConfig: b.config.SSHConfig.Comm.SSHConfigFunc(),
|
||||
},
|
||||
|
||||
|
|
|
@ -334,7 +334,7 @@ func (b *Builder) Run(ctx context.Context, ui packersdk.Ui, hook packersdk.Hook)
|
|||
// configure the communicator ssh, winrm
|
||||
&communicator.StepConnect{
|
||||
Config: &b.config.SSHConfig.Comm,
|
||||
Host: hypervcommon.CommHost(b.config.SSHConfig.Comm.SSHHost),
|
||||
Host: hypervcommon.CommHost(b.config.SSHConfig.Comm.Host()),
|
||||
SSHConfig: b.config.SSHConfig.Comm.SSHConfigFunc(),
|
||||
},
|
||||
|
||||
|
|
|
@ -151,7 +151,7 @@ func (b *Builder) Run(ctx context.Context, ui packersdk.Ui, hook packersdk.Hook)
|
|||
&communicator.StepConnect{
|
||||
Config: &b.config.RunConfig.Comm,
|
||||
Host: CommHost(
|
||||
b.config.RunConfig.Comm.SSHHost,
|
||||
b.config.RunConfig.Comm.Host(),
|
||||
computeClient,
|
||||
b.config.SSHInterface,
|
||||
b.config.SSHIPVersion),
|
||||
|
|
|
@ -4,9 +4,14 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/common"
|
||||
core "github.com/oracle/oci-go-sdk/core"
|
||||
)
|
||||
|
||||
|
@ -19,6 +24,31 @@ type driverOCI struct {
|
|||
context context.Context
|
||||
}
|
||||
|
||||
var retryPolicy = &common.RetryPolicy{
|
||||
MaximumNumberAttempts: 10,
|
||||
ShouldRetryOperation: func(res common.OCIOperationResponse) bool {
|
||||
var e common.ServiceError
|
||||
if errors.As(res.Error, &e) {
|
||||
switch e.GetHTTPStatusCode() {
|
||||
case http.StatusTooManyRequests, http.StatusInternalServerError, http.StatusServiceUnavailable:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
},
|
||||
NextDuration: func(res common.OCIOperationResponse) time.Duration {
|
||||
x := uint64(res.AttemptNumber)
|
||||
d := time.Duration(math.Pow(2, float64(atomic.LoadUint64(&x)))) * time.Second
|
||||
j := time.Duration(rand.Float64()*(2000)) * time.Millisecond
|
||||
w := d + j
|
||||
return w
|
||||
},
|
||||
}
|
||||
|
||||
var requestMetadata = common.RequestMetadata{
|
||||
RetryPolicy: retryPolicy,
|
||||
}
|
||||
|
||||
// NewDriverOCI Creates a new driverOCI with a connected compute client and a connected vcn client.
|
||||
func NewDriverOCI(cfg *Config) (Driver, error) {
|
||||
coreClient, err := core.NewComputeClientWithConfigurationProvider(cfg.configProvider)
|
||||
|
@ -80,6 +110,7 @@ func (d *driverOCI) CreateInstance(ctx context.Context, publicKey string) (strin
|
|||
LifecycleState: "AVAILABLE",
|
||||
SortBy: "TIMECREATED",
|
||||
SortOrder: "DESC",
|
||||
RequestMetadata: requestMetadata,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -127,7 +158,10 @@ func (d *driverOCI) CreateInstance(ctx context.Context, publicKey string) (strin
|
|||
Metadata: metadata,
|
||||
}
|
||||
|
||||
instance, err := d.computeClient.LaunchInstance(context.TODO(), core.LaunchInstanceRequest{LaunchInstanceDetails: instanceDetails})
|
||||
instance, err := d.computeClient.LaunchInstance(context.TODO(), core.LaunchInstanceRequest{
|
||||
LaunchInstanceDetails: instanceDetails,
|
||||
RequestMetadata: requestMetadata,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -145,7 +179,9 @@ func (d *driverOCI) CreateImage(ctx context.Context, id string) (core.Image, err
|
|||
FreeformTags: d.cfg.Tags,
|
||||
DefinedTags: d.cfg.DefinedTags,
|
||||
LaunchMode: core.CreateImageDetailsLaunchModeEnum(d.cfg.LaunchMode),
|
||||
}})
|
||||
},
|
||||
RequestMetadata: requestMetadata,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return core.Image{}, err
|
||||
|
@ -156,15 +192,19 @@ func (d *driverOCI) CreateImage(ctx context.Context, id string) (core.Image, err
|
|||
|
||||
// DeleteImage deletes a custom image.
|
||||
func (d *driverOCI) DeleteImage(ctx context.Context, id string) error {
|
||||
_, err := d.computeClient.DeleteImage(ctx, core.DeleteImageRequest{ImageId: &id})
|
||||
_, err := d.computeClient.DeleteImage(ctx, core.DeleteImageRequest{
|
||||
ImageId: &id,
|
||||
RequestMetadata: requestMetadata,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// GetInstanceIP returns the public or private IP corresponding to the given instance id.
|
||||
func (d *driverOCI) GetInstanceIP(ctx context.Context, id string) (string, error) {
|
||||
vnics, err := d.computeClient.ListVnicAttachments(ctx, core.ListVnicAttachmentsRequest{
|
||||
InstanceId: &id,
|
||||
CompartmentId: &d.cfg.CompartmentID,
|
||||
InstanceId: &id,
|
||||
CompartmentId: &d.cfg.CompartmentID,
|
||||
RequestMetadata: requestMetadata,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -174,7 +214,10 @@ func (d *driverOCI) GetInstanceIP(ctx context.Context, id string) (string, error
|
|||
return "", errors.New("instance has zero VNICs")
|
||||
}
|
||||
|
||||
vnic, err := d.vcnClient.GetVnic(ctx, core.GetVnicRequest{VnicId: vnics.Items[0].VnicId})
|
||||
vnic, err := d.vcnClient.GetVnic(ctx, core.GetVnicRequest{
|
||||
VnicId: vnics.Items[0].VnicId,
|
||||
RequestMetadata: requestMetadata,
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error getting VNIC details: %s", err)
|
||||
}
|
||||
|
@ -192,7 +235,8 @@ func (d *driverOCI) GetInstanceIP(ctx context.Context, id string) (string, error
|
|||
|
||||
func (d *driverOCI) GetInstanceInitialCredentials(ctx context.Context, id string) (string, string, error) {
|
||||
credentials, err := d.computeClient.GetWindowsInstanceInitialCredentials(ctx, core.GetWindowsInstanceInitialCredentialsRequest{
|
||||
InstanceId: &id,
|
||||
InstanceId: &id,
|
||||
RequestMetadata: requestMetadata,
|
||||
})
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
|
@ -204,7 +248,8 @@ func (d *driverOCI) GetInstanceInitialCredentials(ctx context.Context, id string
|
|||
// TerminateInstance terminates a compute instance.
|
||||
func (d *driverOCI) TerminateInstance(ctx context.Context, id string) error {
|
||||
_, err := d.computeClient.TerminateInstance(ctx, core.TerminateInstanceRequest{
|
||||
InstanceId: &id,
|
||||
InstanceId: &id,
|
||||
RequestMetadata: requestMetadata,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
@ -214,7 +259,10 @@ func (d *driverOCI) TerminateInstance(ctx context.Context, id string) error {
|
|||
func (d *driverOCI) WaitForImageCreation(ctx context.Context, id string) error {
|
||||
return waitForResourceToReachState(
|
||||
func(string) (string, error) {
|
||||
image, err := d.computeClient.GetImage(ctx, core.GetImageRequest{ImageId: &id})
|
||||
image, err := d.computeClient.GetImage(ctx, core.GetImageRequest{
|
||||
ImageId: &id,
|
||||
RequestMetadata: requestMetadata,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -233,7 +281,10 @@ func (d *driverOCI) WaitForImageCreation(ctx context.Context, id string) error {
|
|||
func (d *driverOCI) WaitForInstanceState(ctx context.Context, id string, waitStates []string, terminalState string) error {
|
||||
return waitForResourceToReachState(
|
||||
func(string) (string, error) {
|
||||
instance, err := d.computeClient.GetInstance(ctx, core.GetInstanceRequest{InstanceId: &id})
|
||||
instance, err := d.computeClient.GetInstance(ctx, core.GetInstanceRequest{
|
||||
InstanceId: &id,
|
||||
RequestMetadata: requestMetadata,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
@ -238,7 +238,7 @@ func (b *Builder) Run(ctx context.Context, ui packersdk.Ui, hook packersdk.Hook)
|
|||
},
|
||||
&communicator.StepConnect{
|
||||
Config: &b.config.SSHConfig.Comm,
|
||||
Host: parallelscommon.CommHost(b.config.SSHConfig.Comm.SSHHost),
|
||||
Host: parallelscommon.CommHost(b.config.SSHConfig.Comm.Host()),
|
||||
SSHConfig: b.config.SSHConfig.Comm.SSHConfigFunc(),
|
||||
},
|
||||
¶llelscommon.StepUploadVersion{
|
||||
|
|
|
@ -87,7 +87,7 @@ func (b *Builder) Run(ctx context.Context, ui packersdk.Ui, hook packersdk.Hook)
|
|||
},
|
||||
&communicator.StepConnect{
|
||||
Config: &b.config.SSHConfig.Comm,
|
||||
Host: parallelscommon.CommHost(b.config.SSHConfig.Comm.SSHHost),
|
||||
Host: parallelscommon.CommHost(b.config.SSHConfig.Comm.Host()),
|
||||
SSHConfig: b.config.SSHConfig.Comm.SSHConfigFunc(),
|
||||
},
|
||||
¶llelscommon.StepUploadVersion{
|
||||
|
|
|
@ -48,6 +48,7 @@ func (c *CommConfig) Prepare(ctx *interpolate.Context) (warnings []string, errs
|
|||
|
||||
if c.Comm.SSHHost == "" && c.SkipNatMapping {
|
||||
c.Comm.SSHHost = "127.0.0.1"
|
||||
c.Comm.WinRMHost = "127.0.0.1"
|
||||
}
|
||||
|
||||
if c.HostPortMin == 0 {
|
||||
|
|
|
@ -108,6 +108,12 @@ type Config struct {
|
|||
// The number of cpus to use when building the VM.
|
||||
// The default is `1` CPU.
|
||||
CpuCount int `mapstructure:"cpus" required:"false"`
|
||||
// The firmware file to be used by QEMU, which is to be set by the -bios
|
||||
// option of QEMU. Particularly, this option can be set to use EFI instead
|
||||
// of BIOS, by using "OVMF.fd" from OpenFirmware.
|
||||
// If unset, no -bios option is passed to QEMU, using the default of QEMU.
|
||||
// Also see the QEMU documentation.
|
||||
Firmware string `mapstructure:"firmware" required:"false"`
|
||||
// The interface to use for the disk. Allowed values include any of `ide`,
|
||||
// `scsi`, `virtio` or `virtio-scsi`^\*. Note also that any boot commands
|
||||
// or kickstart type scripts must have proper adjustments for resulting
|
||||
|
|
|
@ -98,6 +98,7 @@ type FlatConfig struct {
|
|||
Accelerator *string `mapstructure:"accelerator" required:"false" cty:"accelerator" hcl:"accelerator"`
|
||||
AdditionalDiskSize []string `mapstructure:"disk_additional_size" required:"false" cty:"disk_additional_size" hcl:"disk_additional_size"`
|
||||
CpuCount *int `mapstructure:"cpus" required:"false" cty:"cpus" hcl:"cpus"`
|
||||
Firmware *string `mapstructure:"firmware" required:"false" cty:"firmware" hcl:"firmware"`
|
||||
DiskInterface *string `mapstructure:"disk_interface" required:"false" cty:"disk_interface" hcl:"disk_interface"`
|
||||
DiskSize *string `mapstructure:"disk_size" required:"false" cty:"disk_size" hcl:"disk_size"`
|
||||
SkipResizeDisk *bool `mapstructure:"skip_resize_disk" required:"false" cty:"skip_resize_disk" hcl:"skip_resize_disk"`
|
||||
|
@ -231,6 +232,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
|||
"accelerator": &hcldec.AttrSpec{Name: "accelerator", Type: cty.String, Required: false},
|
||||
"disk_additional_size": &hcldec.AttrSpec{Name: "disk_additional_size", Type: cty.List(cty.String), Required: false},
|
||||
"cpus": &hcldec.AttrSpec{Name: "cpus", Type: cty.Number, Required: false},
|
||||
"firmware": &hcldec.AttrSpec{Name: "firmware", Type: cty.String, Required: false},
|
||||
"disk_interface": &hcldec.AttrSpec{Name: "disk_interface", Type: cty.String, Required: false},
|
||||
"disk_size": &hcldec.AttrSpec{Name: "disk_size", Type: cty.String, Required: false},
|
||||
"skip_resize_disk": &hcldec.AttrSpec{Name: "skip_resize_disk", Type: cty.Bool, Required: false},
|
||||
|
|
|
@ -104,6 +104,11 @@ func (s *stepRun) getDefaultArgs(config *Config, state multistep.StateBag) map[s
|
|||
config.MachineType, config.Accelerator)
|
||||
}
|
||||
|
||||
// Firmware
|
||||
if config.Firmware != "" {
|
||||
defaultArgs["-bios"] = config.Firmware
|
||||
}
|
||||
|
||||
// Configure "-netdev" arguments
|
||||
defaultArgs["-netdev"] = fmt.Sprintf("bridge,id=user.0,br=%s", config.NetBridge)
|
||||
if config.NetBridge == "" {
|
||||
|
|
|
@ -74,8 +74,10 @@ type Config struct {
|
|||
|
||||
RemoveVolume bool `mapstructure:"remove_volume"`
|
||||
|
||||
UserAgent string `mapstructure-to-hcl2:",skip"`
|
||||
ctx interpolate.Context
|
||||
// Shutdown timeout. Default to 5m
|
||||
ShutdownTimeout string `mapstructure:"shutdown_timeout" required:"false"`
|
||||
UserAgent string `mapstructure-to-hcl2:",skip"`
|
||||
ctx interpolate.Context
|
||||
|
||||
// Deprecated configs
|
||||
|
||||
|
@ -255,6 +257,10 @@ func (c *Config) Prepare(raws ...interface{}) ([]string, error) {
|
|||
errs, errors.New("image is required"))
|
||||
}
|
||||
|
||||
if c.ShutdownTimeout == "" {
|
||||
c.ShutdownTimeout = "5m"
|
||||
}
|
||||
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return warnings, errs
|
||||
}
|
||||
|
|
|
@ -80,6 +80,7 @@ type FlatConfig struct {
|
|||
Bootscript *string `mapstructure:"bootscript" required:"false" cty:"bootscript" hcl:"bootscript"`
|
||||
BootType *string `mapstructure:"boottype" required:"false" cty:"boottype" hcl:"boottype"`
|
||||
RemoveVolume *bool `mapstructure:"remove_volume" cty:"remove_volume" hcl:"remove_volume"`
|
||||
ShutdownTimeout *string `mapstructure:"shutdown_timeout" required:"false" cty:"shutdown_timeout" hcl:"shutdown_timeout"`
|
||||
Token *string `mapstructure:"api_token" required:"false" cty:"api_token" hcl:"api_token"`
|
||||
Organization *string `mapstructure:"organization_id" required:"false" cty:"organization_id" hcl:"organization_id"`
|
||||
Region *string `mapstructure:"region" required:"false" cty:"region" hcl:"region"`
|
||||
|
@ -167,6 +168,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
|||
"bootscript": &hcldec.AttrSpec{Name: "bootscript", Type: cty.String, Required: false},
|
||||
"boottype": &hcldec.AttrSpec{Name: "boottype", Type: cty.String, Required: false},
|
||||
"remove_volume": &hcldec.AttrSpec{Name: "remove_volume", Type: cty.Bool, Required: false},
|
||||
"shutdown_timeout": &hcldec.AttrSpec{Name: "shutdown_timeout", Type: cty.String, Required: false},
|
||||
"api_token": &hcldec.AttrSpec{Name: "api_token", Type: cty.String, Required: false},
|
||||
"organization_id": &hcldec.AttrSpec{Name: "organization_id", Type: cty.String, Required: false},
|
||||
"region": &hcldec.AttrSpec{Name: "region", Type: cty.String, Required: false},
|
||||
|
|
|
@ -3,6 +3,7 @@ package scaleway
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
|
@ -30,9 +31,23 @@ func (s *stepShutdown) Run(ctx context.Context, state multistep.StateBag) multis
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
instanceResp, err := instanceAPI.WaitForServer(&instance.WaitForServerRequest{
|
||||
waitRequest := &instance.WaitForServerRequest{
|
||||
ServerID: serverID,
|
||||
})
|
||||
}
|
||||
c := state.Get("config").(*Config)
|
||||
timeout := c.ShutdownTimeout
|
||||
duration, err := time.ParseDuration(timeout)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("error: %s could not parse string %s as a duration", err, timeout)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
if timeout != "" {
|
||||
waitRequest.Timeout = scw.TimeDurationPtr(duration)
|
||||
}
|
||||
|
||||
instanceResp, err := instanceAPI.WaitForServer(waitRequest)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error shutting down server: %s", err)
|
||||
state.Put("error", err)
|
||||
|
|
|
@ -41,17 +41,23 @@ func (s *StepSSHConfig) Run(ctx context.Context, state multistep.StateBag) multi
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
config.Comm.SSHHost = sshConfig.Hostname
|
||||
port, err := strconv.Atoi(sshConfig.Port)
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
if config.Comm.SSHHost == "" {
|
||||
config.Comm.SSHHost = sshConfig.Hostname
|
||||
}
|
||||
if config.Comm.SSHPort == 0 {
|
||||
port, err := strconv.Atoi(sshConfig.Port)
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
config.Comm.SSHPort = port
|
||||
}
|
||||
config.Comm.SSHPort = port
|
||||
|
||||
if config.Comm.SSHUsername != "" {
|
||||
// If user has set the username within the communicator, use the
|
||||
// auth provided there.
|
||||
// username, password, and/or keyfile auth provided there.
|
||||
log.Printf("Overriding SSH config from Vagrant with the username, " +
|
||||
"password, and private key information provided to the Packer template.")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
log.Printf("identity file is %s", sshConfig.IdentityFile)
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
)
|
||||
|
||||
|
@ -15,6 +16,83 @@ func TestStepSSHConfig_Impl(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPrepStepSSHConfig_sshOverrides(t *testing.T) {
|
||||
type testcase struct {
|
||||
name string
|
||||
inputSSHConfig communicator.SSH
|
||||
expectedSSHConfig communicator.SSH
|
||||
}
|
||||
tcs := []testcase{
|
||||
{
|
||||
// defaults to overriding with the ssh config from vagrant\
|
||||
name: "default",
|
||||
inputSSHConfig: communicator.SSH{},
|
||||
expectedSSHConfig: communicator.SSH{
|
||||
SSHHost: "127.0.0.1",
|
||||
SSHPort: 2222,
|
||||
SSHUsername: "vagrant",
|
||||
SSHPassword: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
// respects SSH host and port overrides independent of credential
|
||||
// overrides
|
||||
name: "host_override",
|
||||
inputSSHConfig: communicator.SSH{
|
||||
SSHHost: "123.45.67.8",
|
||||
SSHPort: 1234,
|
||||
},
|
||||
expectedSSHConfig: communicator.SSH{
|
||||
SSHHost: "123.45.67.8",
|
||||
SSHPort: 1234,
|
||||
SSHUsername: "vagrant",
|
||||
SSHPassword: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
// respects credential overrides
|
||||
name: "credential_override",
|
||||
inputSSHConfig: communicator.SSH{
|
||||
SSHUsername: "megan",
|
||||
SSHPassword: "SoSecure",
|
||||
},
|
||||
expectedSSHConfig: communicator.SSH{
|
||||
SSHHost: "127.0.0.1",
|
||||
SSHPort: 2222,
|
||||
SSHUsername: "megan",
|
||||
SSHPassword: "SoSecure",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
driver := &MockVagrantDriver{}
|
||||
config := &Config{
|
||||
Comm: communicator.Config{
|
||||
SSH: tc.inputSSHConfig,
|
||||
},
|
||||
}
|
||||
state := new(multistep.BasicStateBag)
|
||||
state.Put("driver", driver)
|
||||
state.Put("config", config)
|
||||
|
||||
step := StepSSHConfig{}
|
||||
_ = step.Run(context.Background(), state)
|
||||
|
||||
if config.Comm.SSHHost != tc.expectedSSHConfig.SSHHost {
|
||||
t.Fatalf("unexpected sshconfig host: name: %s, recieved %s", tc.name, config.Comm.SSHHost)
|
||||
}
|
||||
if config.Comm.SSHPort != tc.expectedSSHConfig.SSHPort {
|
||||
t.Fatalf("unexpected sshconfig port: name: %s, recieved %d", tc.name, config.Comm.SSHPort)
|
||||
}
|
||||
if config.Comm.SSHUsername != tc.expectedSSHConfig.SSHUsername {
|
||||
t.Fatalf("unexpected sshconfig SSHUsername: name: %s, recieved %s", tc.name, config.Comm.SSHUsername)
|
||||
}
|
||||
if config.Comm.SSHPassword != tc.expectedSSHConfig.SSHPassword {
|
||||
t.Fatalf("unexpected sshconfig SSHUsername: name: %s, recieved %s", tc.name, config.Comm.SSHPassword)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrepStepSSHConfig_GlobalID(t *testing.T) {
|
||||
driver := &MockVagrantDriver{}
|
||||
config := &Config{}
|
||||
|
|
|
@ -50,8 +50,9 @@ func (c *CommConfig) Prepare(ctx *interpolate.Context) []error {
|
|||
c.SkipNatMapping = c.SSHSkipNatMapping
|
||||
}
|
||||
|
||||
if c.Comm.SSHHost == "" {
|
||||
if c.Comm.Host() == "" {
|
||||
c.Comm.SSHHost = "127.0.0.1"
|
||||
c.Comm.WinRMHost = "127.0.0.1"
|
||||
}
|
||||
|
||||
if c.HostPortMin == 0 {
|
||||
|
|
|
@ -22,6 +22,9 @@ type Driver interface {
|
|||
// Create a SCSI controller.
|
||||
CreateSCSIController(vm string, controller string) error
|
||||
|
||||
// Create a VirtIO controller.
|
||||
CreateVirtIOController(vm string, controller string) error
|
||||
|
||||
// Create an NVME controller
|
||||
CreateNVMeController(vm string, controller string, portcount int) error
|
||||
|
||||
|
|
|
@ -64,7 +64,6 @@ func (d *VBox42Driver) CreateNVMeController(vmName string, name string, portcoun
|
|||
}
|
||||
|
||||
func (d *VBox42Driver) CreateSCSIController(vmName string, name string) error {
|
||||
|
||||
command := []string{
|
||||
"storagectl", vmName,
|
||||
"--name", name,
|
||||
|
@ -75,6 +74,17 @@ func (d *VBox42Driver) CreateSCSIController(vmName string, name string) error {
|
|||
return d.VBoxManage(command...)
|
||||
}
|
||||
|
||||
func (d *VBox42Driver) CreateVirtIOController(vmName string, name string) error {
|
||||
command := []string{
|
||||
"storagectl", vmName,
|
||||
"--name", name,
|
||||
"--add", "VirtIO",
|
||||
"--controller", "VirtIO",
|
||||
}
|
||||
|
||||
return d.VBoxManage(command...)
|
||||
}
|
||||
|
||||
func (d *VBox42Driver) RemoveFloppyControllers(vmName string) error {
|
||||
var stdout bytes.Buffer
|
||||
|
||||
|
|
|
@ -13,6 +13,10 @@ type DriverMock struct {
|
|||
CreateSCSIControllerController string
|
||||
CreateSCSIControllerErr error
|
||||
|
||||
CreateVirtIOControllerVM string
|
||||
CreateVirtIOControllerController string
|
||||
CreateVirtIOControllerErr error
|
||||
|
||||
CreateNVMeControllerVM string
|
||||
CreateNVMeControllerController string
|
||||
CreateNVMeControllerErr error
|
||||
|
@ -78,6 +82,12 @@ func (d *DriverMock) CreateSCSIController(vm string, controller string) error {
|
|||
return d.CreateSCSIControllerErr
|
||||
}
|
||||
|
||||
func (d *DriverMock) CreateVirtIOController(vm string, controller string) error {
|
||||
d.CreateVirtIOControllerVM = vm
|
||||
d.CreateVirtIOControllerController = vm
|
||||
return d.CreateVirtIOControllerErr
|
||||
}
|
||||
|
||||
func (d *DriverMock) CreateNVMeController(vm string, controller string, portcount int) error {
|
||||
d.CreateNVMeControllerVM = vm
|
||||
d.CreateNVMeControllerController = vm
|
||||
|
|
|
@ -8,6 +8,8 @@ import (
|
|||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// This step attaches the boot ISO, cd_files iso, and guest additions to the
|
||||
|
@ -69,37 +71,50 @@ func (s *StepAttachISOs) Run(ctx context.Context, state multistep.StateBag) mult
|
|||
|
||||
// We have three different potential isos we can attach, so let's
|
||||
// assign each one its own spot so they don't conflict.
|
||||
var controllerName, device, port string
|
||||
var controllerName string
|
||||
var device, port int
|
||||
switch diskCategory {
|
||||
case "boot_iso":
|
||||
// figure out controller path
|
||||
controllerName = "IDE Controller"
|
||||
port = "0"
|
||||
device = "1"
|
||||
port = 0
|
||||
device = 1
|
||||
if s.ISOInterface == "sata" {
|
||||
controllerName = "SATA Controller"
|
||||
port = "1"
|
||||
device = "0"
|
||||
port = 15
|
||||
device = 0
|
||||
} else if s.ISOInterface == "virtio" {
|
||||
controllerName = "VirtIO Controller"
|
||||
port = 15
|
||||
device = 0
|
||||
}
|
||||
ui.Message("Mounting boot ISO...")
|
||||
case "guest_additions":
|
||||
controllerName = "IDE Controller"
|
||||
port = "1"
|
||||
device = "0"
|
||||
port = 1
|
||||
device = 0
|
||||
if s.GuestAdditionsInterface == "sata" {
|
||||
controllerName = "SATA Controller"
|
||||
port = "2"
|
||||
device = "0"
|
||||
port = 14
|
||||
device = 0
|
||||
} else if s.GuestAdditionsInterface == "virtio" {
|
||||
controllerName = "VirtIO Controller"
|
||||
port = 14
|
||||
device = 0
|
||||
}
|
||||
ui.Message("Mounting guest additions ISO...")
|
||||
case "cd_files":
|
||||
controllerName = "IDE Controller"
|
||||
port = "1"
|
||||
device = "1"
|
||||
port = 1
|
||||
device = 1
|
||||
if s.ISOInterface == "sata" {
|
||||
controllerName = "SATA Controller"
|
||||
port = "3"
|
||||
device = "0"
|
||||
port = 13
|
||||
device = 0
|
||||
} else if s.ISOInterface == "virtio" {
|
||||
controllerName = "VirtIO Controller"
|
||||
port = 13
|
||||
device = 0
|
||||
}
|
||||
ui.Message("Mounting cd_files ISO...")
|
||||
}
|
||||
|
@ -108,8 +123,8 @@ func (s *StepAttachISOs) Run(ctx context.Context, state multistep.StateBag) mult
|
|||
command := []string{
|
||||
"storageattach", vmName,
|
||||
"--storagectl", controllerName,
|
||||
"--port", port,
|
||||
"--device", device,
|
||||
"--port", strconv.Itoa(port),
|
||||
"--device", strconv.Itoa(device),
|
||||
"--type", "dvddrive",
|
||||
"--medium", isoPath,
|
||||
}
|
||||
|
@ -125,8 +140,8 @@ func (s *StepAttachISOs) Run(ctx context.Context, state multistep.StateBag) mult
|
|||
unmountCommand := []string{
|
||||
"storageattach", vmName,
|
||||
"--storagectl", controllerName,
|
||||
"--port", port,
|
||||
"--device", device,
|
||||
"--port", strconv.Itoa(port),
|
||||
"--device", strconv.Itoa(device),
|
||||
"--type", "dvddrive",
|
||||
"--medium", "none",
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ type VBoxManageConfig struct {
|
|||
// ```json
|
||||
// "vboxmanage": [
|
||||
// ["modifyvm", "{{.Name}}", "--memory", "1024"],
|
||||
// ["modifyvm", "{{.Name}}", "--cpus", "2"]
|
||||
// ["modifyvm", "{{.Name}}", "--cpus", "2"]
|
||||
// ]
|
||||
// ```
|
||||
//
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer-plugin-sdk/bootcommand"
|
||||
|
@ -44,9 +45,56 @@ type Config struct {
|
|||
vboxcommon.VBoxVersionConfig `mapstructure:",squash"`
|
||||
vboxcommon.VBoxBundleConfig `mapstructure:",squash"`
|
||||
vboxcommon.GuestAdditionsConfig `mapstructure:",squash"`
|
||||
// The chipset to be used: PIIX3 or ICH9.
|
||||
// When set to piix3, the firmare is PIIX3. This is the default.
|
||||
// When set to ich9, the firmare is ICH9.
|
||||
Chipset string `mapstructure:"chipset" required:"false"`
|
||||
// The firmware to be used: BIOS or EFI.
|
||||
// When set to bios, the firmare is BIOS. This is the default.
|
||||
// When set to efi, the firmare is EFI.
|
||||
Firmware string `mapstructure:"firmware" required:"false"`
|
||||
// Nested virtualization: false or true.
|
||||
// When set to true, nested virtualisation (VT-x/AMD-V) is enabled.
|
||||
// When set to false, nested virtualisation is disabled. This is the default.
|
||||
NestedVirt bool `mapstructure:"nested_virt" required:"false"`
|
||||
// RTC time base: UTC or local.
|
||||
// When set to true, the RTC is set as UTC time.
|
||||
// When set to false, the RTC is set as local time. This is the default.
|
||||
RTCTimeBase string `mapstructure:"rtc_time_base" required:"false"`
|
||||
// The size, in megabytes, of the hard disk to create for the VM. By
|
||||
// default, this is 40000 (about 40 GB).
|
||||
DiskSize uint `mapstructure:"disk_size" required:"false"`
|
||||
// The NIC type to be used for the network interfaces.
|
||||
// When set to 82540EM, the NICs are Intel PRO/1000 MT Desktop (82540EM). This is the default.
|
||||
// When set to 82543GC, the NICs are Intel PRO/1000 T Server (82543GC).
|
||||
// When set to 82545EM, the NICs are Intel PRO/1000 MT Server (82545EM).
|
||||
// When set to Am79C970A, the NICs are AMD PCNet-PCI II network card (Am79C970A).
|
||||
// When set to Am79C973, the NICs are AMD PCNet-FAST III network card (Am79C973).
|
||||
// When set to Am79C960, the NICs are AMD PCnet-ISA/NE2100 (Am79C960).
|
||||
// When set to virtio, the NICs are VirtIO.
|
||||
NICType string `mapstructure:"nic_type" required:"false"`
|
||||
// The audio controller type to be used.
|
||||
// When set to ac97, the audio controller is ICH AC97. This is the default.
|
||||
// When set to hda, the audio controller is Intel HD Audio.
|
||||
// When set to sb16, the audio controller is SoundBlaster 16.
|
||||
AudioController string `mapstructure:"audio_controller" required:"false"`
|
||||
// The graphics controller type to be used.
|
||||
// When set to vboxvga, the graphics controller is VirtualBox VGA. This is the default.
|
||||
// When set to vboxsvga, the graphics controller is VirtualBox SVGA.
|
||||
// When set to vmsvga, the graphics controller is VMware SVGA.
|
||||
// When set to none, the graphics controller is disabled.
|
||||
GfxController string `mapstructure:"gfx_controller" required:"false"`
|
||||
// The VRAM size to be used. By default, this is 4 MiB.
|
||||
GfxVramSize uint `mapstructure:"gfx_vram_size" required:"false"`
|
||||
// 3D acceleration: true or false.
|
||||
// When set to true, 3D acceleration is enabled.
|
||||
// When set to false, 3D acceleration is disabled. This is the default.
|
||||
GfxAccelerate3D bool `mapstructure:"gfx_accelerate_3d" required:"false"`
|
||||
// Screen resolution in EFI mode: WIDTHxHEIGHT.
|
||||
// When set to WIDTHxHEIGHT, it provides the given width and height as screen resolution
|
||||
// to EFI, for example 1920x1080 for Full-HD resolution. By default, no screen resolution
|
||||
// is set. Note, that this option only affects EFI boot, not the (default) BIOS boot.
|
||||
GfxEFIResolution string `mapstructure:"gfx_efi_resolution" required:"false"`
|
||||
// The guest OS type being installed. By default this is other, but you can
|
||||
// get dramatic performance improvements by setting this to the proper
|
||||
// value. To view all available values for this run VBoxManage list
|
||||
|
@ -62,6 +110,7 @@ type Config struct {
|
|||
// defaults to ide. When set to sata, the drive is attached to an AHCI SATA
|
||||
// controller. When set to scsi, the drive is attached to an LsiLogic SCSI
|
||||
// controller. When set to pcie, the drive is attached to an NVMe
|
||||
// controller. When set to virtio, the drive is attached to a VirtIO
|
||||
// controller. Please note that when you use "pcie", you'll need to have
|
||||
// Virtualbox 6, install an [extension
|
||||
// pack](https://www.virtualbox.org/wiki/Downloads#VirtualBox6.0.14OracleVMVirtualBoxExtensionPack)
|
||||
|
@ -98,7 +147,14 @@ type Config struct {
|
|||
HardDriveNonrotational bool `mapstructure:"hard_drive_nonrotational" required:"false"`
|
||||
// The type of controller that the ISO is attached to, defaults to ide.
|
||||
// When set to sata, the drive is attached to an AHCI SATA controller.
|
||||
// When set to virtio, the drive is attached to a VirtIO controller.
|
||||
ISOInterface string `mapstructure:"iso_interface" required:"false"`
|
||||
// Additional disks to create. Uses `vm_name` as the disk name template and
|
||||
// appends `-#` where `#` is the position in the array. `#` starts at 1 since 0
|
||||
// is the default disk. Each value represents the disk image size in MiB.
|
||||
// Each additional disk uses the same disk parameters as the default disk.
|
||||
// Unset by default.
|
||||
AdditionalDiskSize []uint `mapstructure:"disk_additional_size" required:"false"`
|
||||
// Set this to true if you would like to keep the VM registered with
|
||||
// virtualbox. Defaults to false.
|
||||
KeepRegistered bool `mapstructure:"keep_registered" required:"false"`
|
||||
|
@ -160,6 +216,39 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, []string, error) {
|
|||
errs = packersdk.MultiErrorAppend(errs, b.config.BootConfig.Prepare(&b.config.ctx)...)
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.GuestAdditionsConfig.Prepare(b.config.CommConfig.Comm.Type)...)
|
||||
|
||||
if b.config.Chipset == "" {
|
||||
b.config.Chipset = "piix3"
|
||||
}
|
||||
switch b.config.Chipset {
|
||||
case "piix3", "ich9":
|
||||
// do nothing
|
||||
default:
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("chipset can only be piix3 or ich9"))
|
||||
}
|
||||
|
||||
if b.config.Firmware == "" {
|
||||
b.config.Firmware = "bios"
|
||||
}
|
||||
switch b.config.Firmware {
|
||||
case "bios", "efi":
|
||||
// do nothing
|
||||
default:
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("firmware can only be bios or efi"))
|
||||
}
|
||||
|
||||
if b.config.RTCTimeBase == "" {
|
||||
b.config.RTCTimeBase = "local"
|
||||
}
|
||||
switch b.config.RTCTimeBase {
|
||||
case "UTC", "local":
|
||||
// do nothing
|
||||
default:
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("rtc_time_base can only be UTC or local"))
|
||||
}
|
||||
|
||||
if b.config.DiskSize == 0 {
|
||||
b.config.DiskSize = 40000
|
||||
}
|
||||
|
@ -168,6 +257,57 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, []string, error) {
|
|||
b.config.HardDriveInterface = "ide"
|
||||
}
|
||||
|
||||
if b.config.NICType == "" {
|
||||
b.config.NICType = "82540EM"
|
||||
}
|
||||
switch b.config.NICType {
|
||||
case "82540EM", "82543GC", "82545EM", "Am79C970A", "Am79C973", "Am79C960", "virtio":
|
||||
// do nothing
|
||||
default:
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("NIC type can only be 82540EM, 82543GC, 82545EM, Am79C970A, Am79C973, Am79C960 or virtio"))
|
||||
}
|
||||
|
||||
if b.config.GfxController == "" {
|
||||
b.config.GfxController = "vboxvga"
|
||||
}
|
||||
switch b.config.GfxController {
|
||||
case "vboxvga", "vboxsvga", "vmsvga", "none":
|
||||
// do nothing
|
||||
default:
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("Graphics controller type can only be vboxvga, vboxsvga, vmsvga, none"))
|
||||
}
|
||||
|
||||
if b.config.GfxVramSize == 0 {
|
||||
b.config.GfxVramSize = 4
|
||||
} else {
|
||||
if b.config.GfxVramSize < 1 || b.config.GfxVramSize > 128 {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("VGRAM size must be from 0 (use default) to 128"))
|
||||
}
|
||||
}
|
||||
|
||||
if b.config.GfxEFIResolution != "" {
|
||||
re := regexp.MustCompile(`^[\d]+x[\d]+$`)
|
||||
matched := re.MatchString(b.config.GfxEFIResolution)
|
||||
if !matched {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("EFI resolution must be in the format WIDTHxHEIGHT, e.g. 1920x1080"))
|
||||
}
|
||||
}
|
||||
|
||||
if b.config.AudioController == "" {
|
||||
b.config.AudioController = "ac97"
|
||||
}
|
||||
switch b.config.AudioController {
|
||||
case "ac97", "hda", "sb16":
|
||||
// do nothing
|
||||
default:
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("Audio controller type can only be ac97, hda or sb16"))
|
||||
}
|
||||
|
||||
if b.config.GuestOSType == "" {
|
||||
b.config.GuestOSType = "Other"
|
||||
}
|
||||
|
@ -186,11 +326,11 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, []string, error) {
|
|||
}
|
||||
|
||||
switch b.config.HardDriveInterface {
|
||||
case "ide", "sata", "scsi", "pcie":
|
||||
case "ide", "sata", "scsi", "pcie", "virtio":
|
||||
// do nothing
|
||||
default:
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("hard_drive_interface can only be ide, sata, pcie or scsi"))
|
||||
errs, errors.New("hard_drive_interface can only be ide, sata, pcie, scsi or virtio"))
|
||||
}
|
||||
|
||||
if b.config.SATAPortCount == 0 {
|
||||
|
@ -211,9 +351,9 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, []string, error) {
|
|||
errs, errors.New("nvme_port_count cannot be greater than 255"))
|
||||
}
|
||||
|
||||
if b.config.ISOInterface != "ide" && b.config.ISOInterface != "sata" {
|
||||
if b.config.ISOInterface != "ide" && b.config.ISOInterface != "sata" && b.config.ISOInterface != "virtio" {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("iso_interface can only be ide or sata"))
|
||||
errs, errors.New("iso_interface can only be ide, sata or virtio"))
|
||||
}
|
||||
|
||||
// Warnings
|
||||
|
@ -315,7 +455,7 @@ func (b *Builder) Run(ctx context.Context, ui packersdk.Ui, hook packersdk.Hook)
|
|||
},
|
||||
&communicator.StepConnect{
|
||||
Config: &b.config.CommConfig.Comm,
|
||||
Host: vboxcommon.CommHost(b.config.CommConfig.Comm.SSHHost),
|
||||
Host: vboxcommon.CommHost(b.config.CommConfig.Comm.Host()),
|
||||
SSHConfig: b.config.CommConfig.Comm.SSHConfigFunc(),
|
||||
SSHPort: vboxcommon.CommPort,
|
||||
WinRMPort: vboxcommon.CommPort,
|
||||
|
|
|
@ -117,7 +117,17 @@ type FlatConfig struct {
|
|||
GuestAdditionsPath *string `mapstructure:"guest_additions_path" cty:"guest_additions_path" hcl:"guest_additions_path"`
|
||||
GuestAdditionsSHA256 *string `mapstructure:"guest_additions_sha256" cty:"guest_additions_sha256" hcl:"guest_additions_sha256"`
|
||||
GuestAdditionsURL *string `mapstructure:"guest_additions_url" required:"false" cty:"guest_additions_url" hcl:"guest_additions_url"`
|
||||
Chipset *string `mapstructure:"chipset" required:"false" cty:"chipset" hcl:"chipset"`
|
||||
Firmware *string `mapstructure:"firmware" required:"false" cty:"firmware" hcl:"firmware"`
|
||||
NestedVirt *bool `mapstructure:"nested_virt" required:"false" cty:"nested_virt" hcl:"nested_virt"`
|
||||
RTCTimeBase *string `mapstructure:"rtc_time_base" required:"false" cty:"rtc_time_base" hcl:"rtc_time_base"`
|
||||
DiskSize *uint `mapstructure:"disk_size" required:"false" cty:"disk_size" hcl:"disk_size"`
|
||||
NICType *string `mapstructure:"nic_type" required:"false" cty:"nic_type" hcl:"nic_type"`
|
||||
AudioController *string `mapstructure:"audio_controller" required:"false" cty:"audio_controller" hcl:"audio_controller"`
|
||||
GfxController *string `mapstructure:"gfx_controller" required:"false" cty:"gfx_controller" hcl:"gfx_controller"`
|
||||
GfxVramSize *uint `mapstructure:"gfx_vram_size" required:"false" cty:"gfx_vram_size" hcl:"gfx_vram_size"`
|
||||
GfxAccelerate3D *bool `mapstructure:"gfx_accelerate_3d" required:"false" cty:"gfx_accelerate_3d" hcl:"gfx_accelerate_3d"`
|
||||
GfxEFIResolution *string `mapstructure:"gfx_efi_resolution" required:"false" cty:"gfx_efi_resolution" hcl:"gfx_efi_resolution"`
|
||||
GuestOSType *string `mapstructure:"guest_os_type" required:"false" cty:"guest_os_type" hcl:"guest_os_type"`
|
||||
HardDriveDiscard *bool `mapstructure:"hard_drive_discard" required:"false" cty:"hard_drive_discard" hcl:"hard_drive_discard"`
|
||||
HardDriveInterface *string `mapstructure:"hard_drive_interface" required:"false" cty:"hard_drive_interface" hcl:"hard_drive_interface"`
|
||||
|
@ -125,6 +135,7 @@ type FlatConfig struct {
|
|||
NVMePortCount *int `mapstructure:"nvme_port_count" required:"false" cty:"nvme_port_count" hcl:"nvme_port_count"`
|
||||
HardDriveNonrotational *bool `mapstructure:"hard_drive_nonrotational" required:"false" cty:"hard_drive_nonrotational" hcl:"hard_drive_nonrotational"`
|
||||
ISOInterface *string `mapstructure:"iso_interface" required:"false" cty:"iso_interface" hcl:"iso_interface"`
|
||||
AdditionalDiskSize []uint `mapstructure:"disk_additional_size" required:"false" cty:"disk_additional_size" hcl:"disk_additional_size"`
|
||||
KeepRegistered *bool `mapstructure:"keep_registered" required:"false" cty:"keep_registered" hcl:"keep_registered"`
|
||||
SkipExport *bool `mapstructure:"skip_export" required:"false" cty:"skip_export" hcl:"skip_export"`
|
||||
VMName *string `mapstructure:"vm_name" required:"false" cty:"vm_name" hcl:"vm_name"`
|
||||
|
@ -249,7 +260,17 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
|||
"guest_additions_path": &hcldec.AttrSpec{Name: "guest_additions_path", Type: cty.String, Required: false},
|
||||
"guest_additions_sha256": &hcldec.AttrSpec{Name: "guest_additions_sha256", Type: cty.String, Required: false},
|
||||
"guest_additions_url": &hcldec.AttrSpec{Name: "guest_additions_url", Type: cty.String, Required: false},
|
||||
"chipset": &hcldec.AttrSpec{Name: "chipset", Type: cty.String, Required: false},
|
||||
"firmware": &hcldec.AttrSpec{Name: "firmware", Type: cty.String, Required: false},
|
||||
"nested_virt": &hcldec.AttrSpec{Name: "nested_virt", Type: cty.Bool, Required: false},
|
||||
"rtc_time_base": &hcldec.AttrSpec{Name: "rtc_time_base", Type: cty.String, Required: false},
|
||||
"disk_size": &hcldec.AttrSpec{Name: "disk_size", Type: cty.Number, Required: false},
|
||||
"nic_type": &hcldec.AttrSpec{Name: "nic_type", Type: cty.String, Required: false},
|
||||
"audio_controller": &hcldec.AttrSpec{Name: "audio_controller", Type: cty.String, Required: false},
|
||||
"gfx_controller": &hcldec.AttrSpec{Name: "gfx_controller", Type: cty.String, Required: false},
|
||||
"gfx_vram_size": &hcldec.AttrSpec{Name: "gfx_vram_size", Type: cty.Number, Required: false},
|
||||
"gfx_accelerate_3d": &hcldec.AttrSpec{Name: "gfx_accelerate_3d", Type: cty.Bool, Required: false},
|
||||
"gfx_efi_resolution": &hcldec.AttrSpec{Name: "gfx_efi_resolution", Type: cty.String, Required: false},
|
||||
"guest_os_type": &hcldec.AttrSpec{Name: "guest_os_type", Type: cty.String, Required: false},
|
||||
"hard_drive_discard": &hcldec.AttrSpec{Name: "hard_drive_discard", Type: cty.Bool, Required: false},
|
||||
"hard_drive_interface": &hcldec.AttrSpec{Name: "hard_drive_interface", Type: cty.String, Required: false},
|
||||
|
@ -257,6 +278,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
|||
"nvme_port_count": &hcldec.AttrSpec{Name: "nvme_port_count", Type: cty.Number, Required: false},
|
||||
"hard_drive_nonrotational": &hcldec.AttrSpec{Name: "hard_drive_nonrotational", Type: cty.Bool, Required: false},
|
||||
"iso_interface": &hcldec.AttrSpec{Name: "iso_interface", Type: cty.String, Required: false},
|
||||
"disk_additional_size": &hcldec.AttrSpec{Name: "disk_additional_size", Type: cty.List(cty.Number), Required: false},
|
||||
"keep_registered": &hcldec.AttrSpec{Name: "keep_registered", Type: cty.Bool, Required: false},
|
||||
"skip_export": &hcldec.AttrSpec{Name: "skip_export", Type: cty.Bool, Required: false},
|
||||
"vm_name": &hcldec.AttrSpec{Name: "vm_name", Type: cty.String, Required: false},
|
||||
|
|
|
@ -22,31 +22,50 @@ func (s *stepCreateDisk) Run(ctx context.Context, state multistep.StateBag) mult
|
|||
driver := state.Get("driver").(vboxcommon.Driver)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
vmName := state.Get("vmName").(string)
|
||||
|
||||
format := "VDI"
|
||||
path := filepath.Join(config.OutputDir, fmt.Sprintf("%s.%s", config.VMName, strings.ToLower(format)))
|
||||
|
||||
command := []string{
|
||||
"createhd",
|
||||
"--filename", path,
|
||||
"--size", strconv.FormatUint(uint64(config.DiskSize), 10),
|
||||
"--format", format,
|
||||
"--variant", "Standard",
|
||||
// The main disk and additional disks
|
||||
diskFullPaths := []string{}
|
||||
diskSizes := []uint{config.DiskSize}
|
||||
if len(config.AdditionalDiskSize) == 0 {
|
||||
// If there are no additional disks, use disk naming as before
|
||||
diskFullPaths = append(diskFullPaths, filepath.Join(config.OutputDir, fmt.Sprintf("%s.%s", config.VMName, strings.ToLower(format))))
|
||||
} else {
|
||||
// If there are additional disks, use consistent naming with numbers
|
||||
diskFullPaths = append(diskFullPaths, filepath.Join(config.OutputDir, fmt.Sprintf("%s-0.%s", config.VMName, strings.ToLower(format))))
|
||||
|
||||
for i, diskSize := range config.AdditionalDiskSize {
|
||||
path := filepath.Join(config.OutputDir, fmt.Sprintf("%s-%d.%s", config.VMName, i+1, strings.ToLower(format)))
|
||||
diskFullPaths = append(diskFullPaths, path)
|
||||
diskSizes = append(diskSizes, diskSize)
|
||||
}
|
||||
}
|
||||
|
||||
ui.Say("Creating hard drive...")
|
||||
err := driver.VBoxManage(command...)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating hard drive: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
// Create all required disks
|
||||
for i := range diskFullPaths {
|
||||
ui.Say(fmt.Sprintf("Creating hard drive %s with size %d MiB...", diskFullPaths[i], diskSizes[i]))
|
||||
|
||||
command := []string{
|
||||
"createhd",
|
||||
"--filename", diskFullPaths[i],
|
||||
"--size", strconv.FormatUint(uint64(diskSizes[i]), 10),
|
||||
"--format", format,
|
||||
"--variant", "Standard",
|
||||
}
|
||||
|
||||
err := driver.VBoxManage(command...)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating hard drive: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
|
||||
// Add the IDE controller so we can later attach the disk.
|
||||
// When the hard disk controller is not IDE, this device is still used
|
||||
// by VirtualBox to deliver the guest extensions.
|
||||
err = driver.VBoxManage("storagectl", vmName, "--name", "IDE Controller", "--add", "ide")
|
||||
err := driver.VBoxManage("storagectl", vmName, "--name", "IDE Controller", "--add", "ide")
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating disk controller: %s", err)
|
||||
state.Put("error", err)
|
||||
|
@ -66,6 +85,18 @@ func (s *stepCreateDisk) Run(ctx context.Context, state multistep.StateBag) mult
|
|||
}
|
||||
}
|
||||
|
||||
// Add a VirtIO controller if we were asked to use VirtIO. We still attach
|
||||
// the VirtIO controller above because some other things (disks) require
|
||||
// that.
|
||||
if config.HardDriveInterface == "virtio" || config.ISOInterface == "virtio" {
|
||||
if err := driver.CreateVirtIOController(vmName, "VirtIO Controller"); err != nil {
|
||||
err := fmt.Errorf("Error creating disk controller: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
|
||||
if config.HardDriveInterface == "scsi" {
|
||||
if err := driver.CreateSCSIController(vmName, "SCSI Controller"); err != nil {
|
||||
err := fmt.Errorf("Error creating disk controller: %s", err)
|
||||
|
@ -86,13 +117,11 @@ func (s *stepCreateDisk) Run(ctx context.Context, state multistep.StateBag) mult
|
|||
controllerName := "IDE Controller"
|
||||
if config.HardDriveInterface == "sata" {
|
||||
controllerName = "SATA Controller"
|
||||
}
|
||||
|
||||
if config.HardDriveInterface == "scsi" {
|
||||
} else if config.HardDriveInterface == "scsi" {
|
||||
controllerName = "SCSI Controller"
|
||||
}
|
||||
|
||||
if config.HardDriveInterface == "pcie" {
|
||||
} else if config.HardDriveInterface == "virtio" {
|
||||
controllerName = "VirtIO Controller"
|
||||
} else if config.HardDriveInterface == "pcie" {
|
||||
controllerName = "NVMe Controller"
|
||||
}
|
||||
|
||||
|
@ -106,21 +135,23 @@ func (s *stepCreateDisk) Run(ctx context.Context, state multistep.StateBag) mult
|
|||
discard = "on"
|
||||
}
|
||||
|
||||
command = []string{
|
||||
"storageattach", vmName,
|
||||
"--storagectl", controllerName,
|
||||
"--port", "0",
|
||||
"--device", "0",
|
||||
"--type", "hdd",
|
||||
"--medium", path,
|
||||
"--nonrotational", nonrotational,
|
||||
"--discard", discard,
|
||||
}
|
||||
if err := driver.VBoxManage(command...); err != nil {
|
||||
err := fmt.Errorf("Error attaching hard drive: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
for i := range diskFullPaths {
|
||||
command := []string{
|
||||
"storageattach", vmName,
|
||||
"--storagectl", controllerName,
|
||||
"--port", strconv.FormatUint(uint64(i), 10),
|
||||
"--device", "0",
|
||||
"--type", "hdd",
|
||||
"--medium", diskFullPaths[i],
|
||||
"--nonrotational", nonrotational,
|
||||
"--discard", discard,
|
||||
}
|
||||
if err := driver.VBoxManage(command...); err != nil {
|
||||
err := fmt.Errorf("Error attaching hard drive: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
|
|
|
@ -26,7 +26,7 @@ func (s *stepCreateVM) Run(ctx context.Context, state multistep.StateBag) multis
|
|||
|
||||
name := config.VMName
|
||||
|
||||
commands := make([][]string, 6)
|
||||
commands := make([][]string, 14)
|
||||
commands[0] = []string{
|
||||
"createvm", "--name", name,
|
||||
"--ostype", config.GuestOSType, "--register",
|
||||
|
@ -40,9 +40,44 @@ func (s *stepCreateVM) Run(ctx context.Context, state multistep.StateBag) multis
|
|||
commands[4] = []string{"modifyvm", name, "--usb", map[bool]string{true: "on", false: "off"}[config.HWConfig.USB]}
|
||||
|
||||
if strings.ToLower(config.HWConfig.Sound) == "none" {
|
||||
commands[5] = []string{"modifyvm", name, "--audio", config.HWConfig.Sound}
|
||||
commands[5] = []string{"modifyvm", name, "--audio", config.HWConfig.Sound,
|
||||
"--audiocontroller", config.AudioController}
|
||||
} else {
|
||||
commands[5] = []string{"modifyvm", name, "--audio", config.HWConfig.Sound, "--audioin", "on", "--audioout", "on"}
|
||||
commands[5] = []string{"modifyvm", name, "--audio", config.HWConfig.Sound, "--audioin", "on", "--audioout", "on",
|
||||
"--audiocontroller", config.AudioController}
|
||||
}
|
||||
|
||||
commands[6] = []string{"modifyvm", name, "--chipset", config.Chipset}
|
||||
commands[7] = []string{"modifyvm", name, "--firmware", config.Firmware}
|
||||
// Set the configured NIC type for all 8 possible NICs
|
||||
commands[8] = []string{"modifyvm", name,
|
||||
"--nictype1", config.NICType,
|
||||
"--nictype2", config.NICType,
|
||||
"--nictype3", config.NICType,
|
||||
"--nictype4", config.NICType,
|
||||
"--nictype5", config.NICType,
|
||||
"--nictype6", config.NICType,
|
||||
"--nictype7", config.NICType,
|
||||
"--nictype8", config.NICType}
|
||||
commands[9] = []string{"modifyvm", name, "--graphicscontroller", config.GfxController, "--vram", strconv.FormatUint(uint64(config.GfxVramSize), 10)}
|
||||
if config.RTCTimeBase == "UTC" {
|
||||
commands[10] = []string{"modifyvm", name, "--rtcuseutc", "on"}
|
||||
} else {
|
||||
commands[10] = []string{"modifyvm", name, "--rtcuseutc", "off"}
|
||||
}
|
||||
if config.NestedVirt == true {
|
||||
commands[11] = []string{"modifyvm", name, "--nested-hw-virt", "on"}
|
||||
} else {
|
||||
commands[11] = []string{"modifyvm", name, "--nested-hw-virt", "off"}
|
||||
}
|
||||
|
||||
if config.GfxAccelerate3D {
|
||||
commands[12] = []string{"modifyvm", name, "--accelerate3d", "on"}
|
||||
} else {
|
||||
commands[12] = []string{"modifyvm", name, "--accelerate3d", "off"}
|
||||
}
|
||||
if config.GfxEFIResolution != "" {
|
||||
commands[13] = []string{"setextradata", name, "VBoxInternal2/EfiGraphicsResolution", config.GfxEFIResolution}
|
||||
}
|
||||
|
||||
ui.Say("Creating virtual machine...")
|
||||
|
|
|
@ -130,7 +130,7 @@ func (b *Builder) Run(ctx context.Context, ui packersdk.Ui, hook packersdk.Hook)
|
|||
},
|
||||
&communicator.StepConnect{
|
||||
Config: &b.config.CommConfig.Comm,
|
||||
Host: vboxcommon.CommHost(b.config.CommConfig.Comm.SSHHost),
|
||||
Host: vboxcommon.CommHost(b.config.CommConfig.Comm.Host()),
|
||||
SSHConfig: b.config.CommConfig.Comm.SSHConfigFunc(),
|
||||
SSHPort: vboxcommon.CommPort,
|
||||
WinRMPort: vboxcommon.CommPort,
|
||||
|
|
|
@ -114,7 +114,7 @@ func (b *Builder) Run(ctx context.Context, ui packersdk.Ui, hook packersdk.Hook)
|
|||
},
|
||||
&communicator.StepConnect{
|
||||
Config: &b.config.CommConfig.Comm,
|
||||
Host: vboxcommon.CommHost(b.config.CommConfig.Comm.SSHHost),
|
||||
Host: vboxcommon.CommHost(b.config.CommConfig.Comm.Host()),
|
||||
SSHConfig: b.config.CommConfig.Comm.SSHConfigFunc(),
|
||||
SSHPort: vboxcommon.CommPort,
|
||||
WinRMPort: vboxcommon.CommPort,
|
||||
|
|
|
@ -23,7 +23,7 @@ type Driver interface {
|
|||
// Clone clones the VMX and the disk to the destination path. The
|
||||
// destination is a path to the VMX file. The disk will be copied
|
||||
// to that same directory.
|
||||
Clone(dst string, src string, cloneType bool) error
|
||||
Clone(dst string, src string, cloneType bool, snapshot string) error
|
||||
|
||||
// CompactDisk compacts a virtual disk.
|
||||
CompactDisk(string) error
|
||||
|
|
|
@ -112,7 +112,7 @@ func NewESX5Driver(dconfig *DriverConfig, config *SSHConfig, vmName string) (Dri
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (d *ESX5Driver) Clone(dst, src string, linked bool) error {
|
||||
func (d *ESX5Driver) Clone(dst, src string, linked bool, snapshot string) error {
|
||||
|
||||
linesToArray := func(lines string) []string { return strings.Split(strings.Trim(lines, "\r\n"), "\n") }
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ func NewFusion5Driver(dconfig *DriverConfig, config *SSHConfig) Driver {
|
|||
}
|
||||
}
|
||||
|
||||
func (d *Fusion5Driver) Clone(dst, src string, linked bool) error {
|
||||
func (d *Fusion5Driver) Clone(dst, src string, linked bool, snapshot string) error {
|
||||
return errors.New("Cloning is not supported with Fusion 5. Please use Fusion 6+.")
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ func NewFusion6Driver(dconfig *DriverConfig, config *SSHConfig) Driver {
|
|||
}
|
||||
}
|
||||
|
||||
func (d *Fusion6Driver) Clone(dst, src string, linked bool) error {
|
||||
func (d *Fusion6Driver) Clone(dst, src string, linked bool, snapshot string) error {
|
||||
|
||||
var cloneType string
|
||||
if linked {
|
||||
|
@ -36,10 +36,11 @@ func (d *Fusion6Driver) Clone(dst, src string, linked bool) error {
|
|||
cloneType = "full"
|
||||
}
|
||||
|
||||
cmd := exec.Command(d.vmrunPath(),
|
||||
"-T", "fusion",
|
||||
"clone", src, dst,
|
||||
cloneType)
|
||||
args := []string{"-T", "fusion", "clone", src, dst, cloneType}
|
||||
if snapshot != "" {
|
||||
args = append(args, "-snapshot", snapshot)
|
||||
}
|
||||
cmd := exec.Command(d.vmrunPath(), args...)
|
||||
if _, _, err := runAndLog(cmd); err != nil {
|
||||
if strings.Contains(err.Error(), "parameters was invalid") {
|
||||
return fmt.Errorf(
|
||||
|
|
|
@ -14,6 +14,7 @@ type DriverMock struct {
|
|||
CloneDst string
|
||||
CloneSrc string
|
||||
Linked bool
|
||||
Snapshot string
|
||||
CloneErr error
|
||||
|
||||
CompactDiskCalled bool
|
||||
|
@ -113,11 +114,12 @@ func (m NetworkMapperMock) DeviceIntoName(device string) (string, error) {
|
|||
return "", nil
|
||||
}
|
||||
|
||||
func (d *DriverMock) Clone(dst string, src string, linked bool) error {
|
||||
func (d *DriverMock) Clone(dst string, src string, linked bool, snapshot string) error {
|
||||
d.CloneCalled = true
|
||||
d.CloneDst = dst
|
||||
d.CloneSrc = src
|
||||
d.Linked = linked
|
||||
d.Snapshot = snapshot
|
||||
return d.CloneErr
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ func NewPlayer5Driver(config *SSHConfig) Driver {
|
|||
}
|
||||
}
|
||||
|
||||
func (d *Player5Driver) Clone(dst, src string, linked bool) error {
|
||||
func (d *Player5Driver) Clone(dst, src string, linked bool, snapshot string) error {
|
||||
return errors.New("Cloning is not supported with VMWare Player version 5. Please use VMWare Player version 6, or greater.")
|
||||
}
|
||||
|
||||
|
@ -204,12 +204,32 @@ func (d *Player5Driver) Verify() error {
|
|||
|
||||
d.VmwareDriver.NetworkMapper = func() (NetworkNameMapper, error) {
|
||||
pathNetmap := playerNetmapConfPath()
|
||||
if _, err := os.Stat(pathNetmap); err != nil {
|
||||
return nil, fmt.Errorf("Could not find netmap conf file: %s", pathNetmap)
|
||||
}
|
||||
log.Printf("Located networkmapper configuration file using Player: %s", pathNetmap)
|
||||
|
||||
return ReadNetmapConfig(pathNetmap)
|
||||
// If we were able to find the file (no error), then we can proceed with reading
|
||||
// the networkmapper configuration.
|
||||
if _, err := os.Stat(pathNetmap); err == nil {
|
||||
log.Printf("Located networkmapper configuration file using Player: %s", pathNetmap)
|
||||
return ReadNetmapConfig(pathNetmap)
|
||||
}
|
||||
|
||||
// If we weren't able to find the networkmapper configuration file, then fall back
|
||||
// to the networking file which might also be in the configuration directory.
|
||||
libpath, _ := playerVMwareRoot()
|
||||
pathNetworking := filepath.Join(libpath, "networking")
|
||||
if _, err := os.Stat(pathNetworking); err != nil {
|
||||
return nil, fmt.Errorf("Could not determine network mappings from files in path: %s", libpath)
|
||||
}
|
||||
|
||||
// We were able to successfully stat the file.. So, now we can open a handle to it.
|
||||
log.Printf("Located networking configuration file using Player: %s", pathNetworking)
|
||||
fd, err := os.Open(pathNetworking)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
// Then we pass the handle to the networking configuration parser.
|
||||
return ReadNetworkingConfig(fd)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ func NewPlayer6Driver(config *SSHConfig) Driver {
|
|||
}
|
||||
}
|
||||
|
||||
func (d *Player6Driver) Clone(dst, src string, linked bool) error {
|
||||
func (d *Player6Driver) Clone(dst, src string, linked bool, snapshot string) error {
|
||||
// TODO(rasa) check if running player+, not just player
|
||||
|
||||
var cloneType string
|
||||
|
@ -31,11 +31,11 @@ func (d *Player6Driver) Clone(dst, src string, linked bool) error {
|
|||
cloneType = "full"
|
||||
}
|
||||
|
||||
cmd := exec.Command(d.Player5Driver.VmrunPath,
|
||||
"-T", "ws",
|
||||
"clone", src, dst,
|
||||
cloneType)
|
||||
|
||||
args := []string{"-T", "ws", "clone", src, dst, cloneType}
|
||||
if snapshot != "" {
|
||||
args = append(args, "-snapshot", snapshot)
|
||||
}
|
||||
cmd := exec.Command(d.Player5Driver.VmrunPath, args...)
|
||||
if _, _, err := runAndLog(cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ func NewWorkstation10Driver(config *SSHConfig) Driver {
|
|||
}
|
||||
}
|
||||
|
||||
func (d *Workstation10Driver) Clone(dst, src string, linked bool) error {
|
||||
func (d *Workstation10Driver) Clone(dst, src string, linked bool, snapshot string) error {
|
||||
|
||||
var cloneType string
|
||||
if linked {
|
||||
|
@ -30,11 +30,11 @@ func (d *Workstation10Driver) Clone(dst, src string, linked bool) error {
|
|||
cloneType = "full"
|
||||
}
|
||||
|
||||
cmd := exec.Command(d.Workstation9Driver.VmrunPath,
|
||||
"-T", "ws",
|
||||
"clone", src, dst,
|
||||
cloneType)
|
||||
|
||||
args := []string{"-T", "ws", "clone", src, dst, cloneType}
|
||||
if snapshot != "" {
|
||||
args = append(args, "-snapshot", snapshot)
|
||||
}
|
||||
cmd := exec.Command(d.Workstation9Driver.VmrunPath, args...)
|
||||
if _, _, err := runAndLog(cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ func NewWorkstation9Driver(config *SSHConfig) Driver {
|
|||
}
|
||||
}
|
||||
|
||||
func (d *Workstation9Driver) Clone(dst, src string, linked bool) error {
|
||||
func (d *Workstation9Driver) Clone(dst, src string, linked bool, snapshot string) error {
|
||||
return errors.New("Cloning is not supported with VMware WS version 9. Please use VMware WS version 10, or greater.")
|
||||
}
|
||||
|
||||
|
@ -165,12 +165,32 @@ func (d *Workstation9Driver) Verify() error {
|
|||
|
||||
d.VmwareDriver.NetworkMapper = func() (NetworkNameMapper, error) {
|
||||
pathNetmap := workstationNetmapConfPath()
|
||||
if _, err := os.Stat(pathNetmap); err != nil {
|
||||
return nil, fmt.Errorf("Could not find netmap conf file: %s", pathNetmap)
|
||||
}
|
||||
log.Printf("Located networkmapper configuration file using Workstation: %s", pathNetmap)
|
||||
|
||||
return ReadNetmapConfig(pathNetmap)
|
||||
// Check that the file for the networkmapper configuration exists. If there's no
|
||||
// error, then the file exists and we can proceed to read the configuration out of it.
|
||||
if _, err := os.Stat(pathNetmap); err == nil {
|
||||
log.Printf("Located networkmapper configuration file using Workstation: %s", pathNetmap)
|
||||
return ReadNetmapConfig(pathNetmap)
|
||||
}
|
||||
|
||||
// If we weren't able to find the networkmapper configuration file, then fall back
|
||||
// to the networking file which might also be in the configuration directory.
|
||||
libpath, _ := workstationVMwareRoot()
|
||||
pathNetworking := filepath.Join(libpath, "networking")
|
||||
if _, err := os.Stat(pathNetworking); err != nil {
|
||||
return nil, fmt.Errorf("Could not determine network mappings from files in path: %s", libpath)
|
||||
}
|
||||
|
||||
// We were able to successfully stat the file.. So, now we can open a handle to it.
|
||||
log.Printf("Located networking configuration file using Workstation: %s", pathNetworking)
|
||||
fd, err := os.Open(pathNetworking)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
// Then we pass the handle to the networking configuration parser.
|
||||
return ReadNetworkingConfig(fd)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -97,6 +97,7 @@ func (b *Builder) Run(ctx context.Context, ui packersdk.Ui, hook packersdk.Hook)
|
|||
OutputDir: &b.config.OutputDir,
|
||||
VMName: b.config.VMName,
|
||||
Linked: b.config.Linked,
|
||||
Snapshot: b.config.AttachSnapshot,
|
||||
},
|
||||
&vmwcommon.StepConfigureVMX{
|
||||
CustomData: b.config.VMXData,
|
||||
|
|
|
@ -47,6 +47,11 @@ type Config struct {
|
|||
// scenarios. Most users will wish to create a full clone instead. Defaults
|
||||
// to false.
|
||||
Linked bool `mapstructure:"linked" required:"false"`
|
||||
// Default to `null/empty`. The name of an
|
||||
// **existing** snapshot to which the builder shall attach the VM before
|
||||
// starting it. If no snapshot is specified the builder will simply start the
|
||||
// VM from it's current state i.e. snapshot.
|
||||
AttachSnapshot string `mapstructure:"attach_snapshot" required:"false"`
|
||||
// Path to the source VMX file to clone. If
|
||||
// remote_type is enabled then this specifies a path on the remote_host.
|
||||
SourcePath string `mapstructure:"source_path" required:"true"`
|
||||
|
|
|
@ -123,6 +123,7 @@ type FlatConfig struct {
|
|||
DiskName *string `mapstructure:"vmdk_name" required:"false" cty:"vmdk_name" hcl:"vmdk_name"`
|
||||
DiskTypeId *string `mapstructure:"disk_type_id" required:"false" cty:"disk_type_id" hcl:"disk_type_id"`
|
||||
Linked *bool `mapstructure:"linked" required:"false" cty:"linked" hcl:"linked"`
|
||||
AttachSnapshot *string `mapstructure:"attach_snapshot" required:"false" cty:"attach_snapshot" hcl:"attach_snapshot"`
|
||||
SourcePath *string `mapstructure:"source_path" required:"true" cty:"source_path" hcl:"source_path"`
|
||||
VMName *string `mapstructure:"vm_name" required:"false" cty:"vm_name" hcl:"vm_name"`
|
||||
}
|
||||
|
@ -252,6 +253,7 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
|||
"vmdk_name": &hcldec.AttrSpec{Name: "vmdk_name", Type: cty.String, Required: false},
|
||||
"disk_type_id": &hcldec.AttrSpec{Name: "disk_type_id", Type: cty.String, Required: false},
|
||||
"linked": &hcldec.AttrSpec{Name: "linked", Type: cty.Bool, Required: false},
|
||||
"attach_snapshot": &hcldec.AttrSpec{Name: "attach_snapshot", Type: cty.String, Required: false},
|
||||
"source_path": &hcldec.AttrSpec{Name: "source_path", Type: cty.String, Required: false},
|
||||
"vm_name": &hcldec.AttrSpec{Name: "vm_name", Type: cty.String, Required: false},
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ type StepCloneVMX struct {
|
|||
Path string
|
||||
VMName string
|
||||
Linked bool
|
||||
Snapshot string
|
||||
tempDir string
|
||||
}
|
||||
|
||||
|
@ -38,7 +39,7 @@ func (s *StepCloneVMX) Run(ctx context.Context, state multistep.StateBag) multis
|
|||
log.Printf("Cloning from: %s", s.Path)
|
||||
log.Printf("Cloning to: %s", vmxPath)
|
||||
|
||||
if err := driver.Clone(vmxPath, s.Path, s.Linked); err != nil {
|
||||
if err := driver.Clone(vmxPath, s.Path, s.Linked, s.Snapshot); err != nil {
|
||||
return halt(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"os"
|
||||
"io/fs"
|
||||
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
|
@ -13,7 +13,7 @@ import (
|
|||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatOutputConfig struct {
|
||||
OutputDir *string `mapstructure:"output_directory" required:"false" cty:"output_directory" hcl:"output_directory"`
|
||||
DirPerm *os.FileMode `mapstructure:"directory_permission" required:"false" cty:"directory_permission" hcl:"directory_permission"`
|
||||
DirPerm *fs.FileMode `mapstructure:"directory_permission" required:"false" cty:"directory_permission" hcl:"directory_permission"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatOutputConfig.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"os"
|
||||
"io/fs"
|
||||
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
|
@ -17,7 +17,7 @@ type FlatExportConfig struct {
|
|||
Images *bool `mapstructure:"images" cty:"images" hcl:"images"`
|
||||
Manifest *string `mapstructure:"manifest" cty:"manifest" hcl:"manifest"`
|
||||
OutputDir *string `mapstructure:"output_directory" required:"false" cty:"output_directory" hcl:"output_directory"`
|
||||
DirPerm *os.FileMode `mapstructure:"directory_permission" required:"false" cty:"directory_permission" hcl:"directory_permission"`
|
||||
DirPerm *fs.FileMode `mapstructure:"directory_permission" required:"false" cty:"directory_permission" hcl:"directory_permission"`
|
||||
Options []string `mapstructure:"options" cty:"options" hcl:"options"`
|
||||
}
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ func checkPluginName(name string) error {
|
|||
strings.HasPrefix(name, "packer-post-processor-") {
|
||||
fmt.Printf("\n[WARNING] Plugin is named with old prefix `packer-[builder|provisioner|post-processor]-{name})`. " +
|
||||
"These will be detected but Packer cannot install them automatically. " +
|
||||
"The plugin must be a multi-plugin named packer-plugin-{name} to be installable through the `packer init` command.\n")
|
||||
"The plugin must be a multi-component plugin named packer-plugin-{name} to be installable through the `packer init` command.\n")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("plugin's name is not valid")
|
||||
|
|
|
@ -126,7 +126,11 @@ func (m *Meta) GetConfigFromJSON(cla *MetaArgs) (packer.Handler, int) {
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
m.Ui.Error(fmt.Sprintf("Failed to parse template: %s", err))
|
||||
m.Ui.Error(fmt.Sprintf("Failed to parse file as legacy JSON template: "+
|
||||
"if you are using an HCL template, check your file extensions; they "+
|
||||
"should be either *.pkr.hcl or *.pkr.json; see the docs for more "+
|
||||
"details: https://www.packer.io/docs/templates/hcl_templates. \n"+
|
||||
"Original error: %s", err))
|
||||
return nil, 1
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,8 @@ func TestBuildCommand_RunContext_CtxCancel(t *testing.T) {
|
|||
defer close(codeC)
|
||||
cfg, ret := c.ParseArgs(tt.args)
|
||||
if ret != 0 {
|
||||
t.Fatal("ParseArgs failed.")
|
||||
t.Error("ParseArgs failed.")
|
||||
return
|
||||
}
|
||||
codeC <- c.RunContext(ctx, cfg)
|
||||
}()
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue