Compare commits
34 Commits
moss_debug
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
e1f70147d9 | ||
|
6e037aa84b | ||
|
4ab255caba | ||
|
2b26376180 | ||
|
36a7b28b5f | ||
|
4be9dfd183 | ||
|
98990fc34e | ||
|
21833e7324 | ||
|
3e3e013be5 | ||
|
940180fb5b | ||
|
1aa7a1b3f8 | ||
|
86007b04aa | ||
|
488e6d80aa | ||
|
0a05b834d7 | ||
|
f80da79b85 | ||
|
f3f58b1c39 | ||
|
8eb85ac0e5 | ||
|
444605c127 | ||
|
e3010fa817 | ||
|
638be57e43 | ||
|
69572b442a | ||
|
f07813e14d | ||
|
5a14b11f2e | ||
|
fe6077fc85 | ||
|
df89b4b52c | ||
|
70739cf1a1 | ||
|
ca513aa028 | ||
|
57639e8330 | ||
|
41c0668d6e | ||
|
b6a9da11a2 | ||
|
a3daf4c686 | ||
|
d9b874f118 | ||
|
1d4a8e7ba2 | ||
|
6867456a72 |
5
.github/labeler-issue-triage.yml
vendored
Normal file
5
.github/labeler-issue-triage.yml
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
bug:
|
||||
- 'panic:'
|
||||
crash:
|
||||
- 'panic:'
|
||||
|
17
.github/workflows/issue-comment-created.yml
vendored
Normal file
17
.github/workflows/issue-comment-created.yml
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
name: Issue Comment Created Triage
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
issue_comment_triage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions-ecosystem/action-remove-labels@v1
|
||||
with:
|
||||
github_token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
labels: |
|
||||
stale
|
||||
waiting-reply
|
16
.github/workflows/issues-opened.yml
vendored
Normal file
16
.github/workflows/issues-opened.yml
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
name: Issue Opened Triage
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
issue_triage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: github/issue-labeler@v2
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
configuration-path: .github/labeler-issue-triage.yml
|
||||
|
29
.github/workflows/lock.yml
vendored
Normal file
29
.github/workflows/lock.yml
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
name: 'Lock Threads'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '50 1 * * *'
|
||||
|
||||
# Only 50 issues will be handled during a given run.
|
||||
jobs:
|
||||
lock:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: dessant/lock-threads@v2
|
||||
with:
|
||||
github-token: ${{ github.token }}
|
||||
issue-lock-comment: >
|
||||
I'm going to lock this issue because it has been closed for _30 days_ ⏳. This helps our maintainers find and focus on the active issues.
|
||||
|
||||
If you have found a problem that seems similar to this, please open a new issue and complete the issue template so we can capture all the details necessary to investigate further.
|
||||
issue-lock-inactive-days: '30'
|
||||
# Issues older than 180 days ago should be ignored
|
||||
issue-exclude-created-before: '2020-11-01'
|
||||
pr-lock-comment: >
|
||||
I'm going to lock this pull request because it has been closed for _30 days_ ⏳. This helps our maintainers find and focus on the active issues.
|
||||
|
||||
If you have found a problem that seems related to this change, please open a new issue and complete the issue template so we can capture all the details necessary to investigate further.
|
||||
pr-lock-inactive-days: '30'
|
||||
# Issues older than 180 days ago should be ignored
|
||||
pr-exclude-created-before: '2020-11-01'
|
||||
|
@ -1,28 +1,3 @@
|
||||
|
||||
behavior "regexp_issue_labeler" "panic_label" {
|
||||
regexp = "panic:"
|
||||
labels = ["crash", "bug"]
|
||||
}
|
||||
|
||||
behavior "remove_labels_on_reply" "remove_stale" {
|
||||
labels = ["waiting-reply", "stale"]
|
||||
only_non_maintainers = true
|
||||
}
|
||||
|
||||
poll "closed_issue_locker" "locker" {
|
||||
schedule = "0 50 1 * * *"
|
||||
closed_for = "720h" # 30 days
|
||||
max_issues = 500
|
||||
sleep_between_issues = "5s"
|
||||
no_comment_if_no_activity_for = "4320h" # 180 days
|
||||
|
||||
message = <<-EOF
|
||||
I'm going to lock this issue because it has been closed for _30 days_ ⏳. This helps our maintainers find and focus on the active issues.
|
||||
|
||||
If you have found a problem that seems similar to this, please open a new issue and complete the issue template so we can capture all the details necessary to investigate further.
|
||||
EOF
|
||||
}
|
||||
|
||||
poll "label_issue_migrater" "remote_plugin_migrater" {
|
||||
schedule = "0 20 * * * *"
|
||||
new_owner = "hashicorp"
|
||||
|
95
CHANGELOG.md
95
CHANGELOG.md
@ -2,45 +2,46 @@
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
|
||||
Major refactor: Extracted a majority of HashiCorp-maintained and community plugins from the Packer Core repository. They now live in their own multi-component plugin repositiores. The following repositories have been created, and their components have been deleted from the "github.com/hashicorp/packer" repository.
|
||||
|
||||
* "github.com/hashicorp/packer-plugin-docker"
|
||||
* "github.com/hashicorp/packer-plugin-amazon"
|
||||
* "github.com/hashicorp/packer-plugin-ansible"
|
||||
* "github.com/hashicorp/packer-plugin-alicloud" [GH-10932]
|
||||
* "github.com/hashicorp/packer-plugin-amazon" [GH-10800]
|
||||
* "github.com/hashicorp/packer-plugin-ansible" [GH-10912]
|
||||
* "github.com/hashicorp/packer-plugin-azure" [GH-10979]
|
||||
* "github.com/hashicorp/packer-plugin-chef" [GH-10921]
|
||||
* "github.com/hashicorp/packer-plugin-cloudstack" [GH-10934]
|
||||
* "github.com/hashicorp/packer-plugin-converge" [GH-10956]
|
||||
* "github.com/hashicorp/packer-plugin-digitalocean" [GH-10961]
|
||||
* "github.com/hashicorp/packer-plugin-docker" [GH-10695]
|
||||
* "github.com/hashicorp/packer-plugin-googlecompute" [GH-10890]
|
||||
* "github.com/hashicorp/packer-plugin-hcloud" [GH-10966]
|
||||
* "github.com/hashicorp/packer-plugin-hyperone" [GH-10949]
|
||||
* "github.com/hashicorp/packer-plugin-hyperv" [GH-10949]
|
||||
* "github.com/hashicorp/packer-plugin-inspec"
|
||||
* "github.com/hashicorp/packer-plugin-azure"
|
||||
* "github.com/hashicorp/packer-plugin-googlecompute"
|
||||
* "github.com/hashicorp/packer-plugin-parallels"
|
||||
* "github.com/hashicorp/packer-plugin-qemu"
|
||||
* "github.com/hashicorp/packer-plugin-vagrant"
|
||||
* "github.com/hashicorp/packer-plugin-virtualbox"
|
||||
* "github.com/hashicorp/packer-plugin-vmware"
|
||||
* "github.com/hashicorp/packer-plugin-vsphere"
|
||||
* "github.com/hashicorp/packer-plugin-alicloud"
|
||||
* "github.com/hashicorp/packer-plugin-cloudstack"
|
||||
* "github.com/hashicorp/packer-plugin-digitalocean"
|
||||
* "github.com/hashicorp/packer-plugin-hcloud"
|
||||
* "github.com/hashicorp/packer-plugin-hyperone"
|
||||
* "github.com/hashicorp/packer-plugin-hyperv"
|
||||
* "github.com/hashicorp/packer-plugin-ionos-cloud"
|
||||
* "github.com/hashicorp/packer-plugin-jdcloud"
|
||||
* "github.com/hashicorp/packer-plugin-linode"
|
||||
* "github.com/hashicorp/packer-plugin-lxc"
|
||||
* "github.com/hashicorp/packer-plugin-lxd"
|
||||
* "github.com/hashicorp/packer-plugin-ncloud"
|
||||
* "github.com/hashicorp/packer-plugin-openstack"
|
||||
* "github.com/hashicorp/packer-plugin-oracle"
|
||||
* "github.com/hashicorp/packer-plugin-outscale"
|
||||
* "github.com/hashicorp/packer-plugin-proxmox"
|
||||
* "github.com/hashicorp/packer-plugin-scaleway"
|
||||
* "github.com/hashicorp/packer-plugin-tencentcloud"
|
||||
* "github.com/hashicorp/packer-plugin-triton"
|
||||
* "github.com/hashicorp/packer-plugin-ucloud"
|
||||
* "github.com/hashicorp/packer-plugin-yandex"
|
||||
* "github.com/hashicorp/packer-plugin-chef"
|
||||
* "github.com/hashicorp/packer-plugin-converge"
|
||||
* "github.com/hashicorp/packer-plugin-puppet"
|
||||
* "github.com/hashicorp/packer-plugin-jdcloud" [GH-10946]
|
||||
* "github.com/hashicorp/packer-plugin-linode" [GH-10947]
|
||||
* "github.com/hashicorp/packer-plugin-lxc" [GH-10965]
|
||||
* "github.com/hashicorp/packer-plugin-lxd" [GH-10965]
|
||||
* "github.com/hashicorp/packer-plugin-ncloud" [GH-10937]
|
||||
* "github.com/hashicorp/packer-plugin-openstack" [GH-10933]
|
||||
* "github.com/hashicorp/packer-plugin-oracle" [GH-10962]
|
||||
* "github.com/hashicorp/packer-plugin-outscale" [GH-10941]
|
||||
* "github.com/hashicorp/packer-plugin-parallels" [GH-10936]
|
||||
* "github.com/hashicorp/packer-plugin-proxmox" [GH-10930]
|
||||
* "github.com/hashicorp/packer-plugin-puppet" [GH-10943]
|
||||
* "github.com/hashicorp/packer-plugin-qemu" [GH-10929]
|
||||
* "github.com/hashicorp/packer-plugin-salt"
|
||||
* "github.com/hashicorp/packer-plugin-scaleway" [GH-10939]
|
||||
* "github.com/hashicorp/packer-plugin-tencentcloud" [GH-10967]
|
||||
* "github.com/hashicorp/packer-plugin-triton" [GH-10963]
|
||||
* "github.com/hashicorp/packer-plugin-ucloud" [GH-10953]
|
||||
* "github.com/hashicorp/packer-plugin-vagrant" [GH-10960]
|
||||
* "github.com/hashicorp/packer-plugin-virtualbox" [GH-10910]
|
||||
* "github.com/hashicorp/packer-plugin-vmware" [GH-10920]
|
||||
* "github.com/hashicorp/packer-plugin-vsphere" [GH-10896]
|
||||
* "github.com/hashicorp/packer-plugin-yandex" [GH-10970]
|
||||
|
||||
_this will not be a backwards-breaking change in v1.7.3_ because the extracted
|
||||
components are being vendored back into Packer. However, we encourage users to
|
||||
@ -68,9 +69,31 @@ components will not be removed from the main packer binary:
|
||||
* `shell-local` post-processor
|
||||
|
||||
### Bug Fixes:
|
||||
* core/hcl: Fix Invalid provisioner pause_before panic [GH-10978]
|
||||
* builder/azure: Add `keep_os_disk` parameter to control OS disk deletion [GH-10045]
|
||||
* builder/azure: Add `keep_os_disk` parameter to control OS disk deletion
|
||||
[GH-10045]
|
||||
* builder/azure: Stop SIG timout from being overridden by PollingDuration
|
||||
[GH-10816]
|
||||
* builder/azure: Support shared image gallery storage account type [GH-10863]
|
||||
* builder/proxmox: Proxmox builder use ipv4 address instead of always ipv6.
|
||||
[GH-10858]
|
||||
* core/hcl: Fix Invalid provisioner pause_before panic [GH-10978]
|
||||
* core: HCL "index" function now actually returns the index of the element
|
||||
[GH-11008]
|
||||
* core: Implemented DEFAULT_NAME handling for datasource plugins [GH-11026]
|
||||
|
||||
### Enhancements:
|
||||
|
||||
* builder/azure: Added custom nicname and osdiskname [GH-10938]
|
||||
* builder/azure: Add support for shared image gallery storage account type
|
||||
[GH-10863]
|
||||
* builder/digitalocean: support ecdsa, ed25519, dsa temporary key types.
|
||||
[GH-10856]
|
||||
* builder/ncloud: Support ncloud vpc version [GH-10870]
|
||||
* post-processor/compress: Add bzip2 support to post-processor [GH-10867]
|
||||
* post-processor/googlecompute-import: Add Image Storage Locations field
|
||||
[GH-10864]
|
||||
* Removed the golang "vendor" directory in favor of go modules. This should not
|
||||
affect end users. [GH-10916]
|
||||
|
||||
## 1.7.2 (April 05, 2021)
|
||||
|
||||
|
@ -1,41 +0,0 @@
|
||||
package oneandone
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type Artifact struct {
|
||||
snapshotId string
|
||||
snapshotName string
|
||||
|
||||
// StateData should store data such as GeneratedData
|
||||
// to be shared with post-processors
|
||||
StateData map[string]interface{}
|
||||
}
|
||||
|
||||
func (*Artifact) BuilderId() string {
|
||||
return BuilderId
|
||||
}
|
||||
|
||||
func (a *Artifact) Files() []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (*Artifact) Id() string {
|
||||
return "Null"
|
||||
}
|
||||
|
||||
func (a *Artifact) String() string {
|
||||
if a.snapshotId == "" {
|
||||
return "No image has been created."
|
||||
}
|
||||
return fmt.Sprintf("A snapshot was created: '%v', '%v'", a.snapshotId, a.snapshotName)
|
||||
}
|
||||
|
||||
func (a *Artifact) State(name string) interface{} {
|
||||
return a.StateData[name]
|
||||
}
|
||||
|
||||
func (a *Artifact) Destroy() error {
|
||||
return nil
|
||||
}
|
@ -1,82 +0,0 @@
|
||||
package oneandone
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
const BuilderId = "packer.oneandone"
|
||||
|
||||
type Builder struct {
|
||||
config Config
|
||||
runner multistep.Runner
|
||||
}
|
||||
|
||||
func (b *Builder) ConfigSpec() hcldec.ObjectSpec { return b.config.FlatMapstructure().HCL2Spec() }
|
||||
|
||||
func (b *Builder) Prepare(raws ...interface{}) ([]string, []string, error) {
|
||||
warnings, errs := b.config.Prepare(raws...)
|
||||
if errs != nil {
|
||||
return nil, warnings, errs
|
||||
}
|
||||
|
||||
return nil, warnings, nil
|
||||
}
|
||||
|
||||
func (b *Builder) Run(ctx context.Context, ui packersdk.Ui, hook packersdk.Hook) (packersdk.Artifact, error) {
|
||||
|
||||
state := new(multistep.BasicStateBag)
|
||||
|
||||
state.Put("config", &b.config)
|
||||
state.Put("hook", hook)
|
||||
state.Put("ui", ui)
|
||||
|
||||
steps := []multistep.Step{
|
||||
&StepCreateSSHKey{
|
||||
Debug: b.config.PackerDebug,
|
||||
DebugKeyPath: fmt.Sprintf("oneandone_%s", b.config.SnapshotName),
|
||||
},
|
||||
new(stepCreateServer),
|
||||
&communicator.StepConnect{
|
||||
Config: &b.config.Comm,
|
||||
Host: communicator.CommHost(b.config.Comm.Host(), "server_ip"),
|
||||
SSHConfig: b.config.Comm.SSHConfigFunc(),
|
||||
},
|
||||
&commonsteps.StepProvision{},
|
||||
&commonsteps.StepCleanupTempKeys{
|
||||
Comm: &b.config.Comm,
|
||||
},
|
||||
new(stepTakeSnapshot),
|
||||
}
|
||||
|
||||
b.runner = commonsteps.NewRunner(steps, b.config.PackerConfig, ui)
|
||||
b.runner.Run(ctx, state)
|
||||
|
||||
if rawErr, ok := state.GetOk("error"); ok {
|
||||
return nil, rawErr.(error)
|
||||
}
|
||||
|
||||
if temp, ok := state.GetOk("snapshot_name"); ok {
|
||||
b.config.SnapshotName = temp.(string)
|
||||
}
|
||||
|
||||
artifact := &Artifact{
|
||||
snapshotName: b.config.SnapshotName,
|
||||
StateData: map[string]interface{}{"generated_data": state.Get("generated_data")},
|
||||
}
|
||||
|
||||
if id, ok := state.GetOk("snapshot_id"); ok {
|
||||
artifact.snapshotId = id.(string)
|
||||
} else {
|
||||
return nil, errors.New("Image creation has failed.")
|
||||
}
|
||||
|
||||
return artifact, nil
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
package oneandone
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
builderT "github.com/hashicorp/packer/acctest"
|
||||
)
|
||||
|
||||
func TestBuilderAcc_basic(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccBasic,
|
||||
})
|
||||
}
|
||||
|
||||
func testAccPreCheck(t *testing.T) {
|
||||
if v := os.Getenv("ONEANDONE_TOKEN"); v == "" {
|
||||
t.Fatal("ONEANDONE_TOKEN must be set for acceptance tests")
|
||||
}
|
||||
}
|
||||
|
||||
const testBuilderAccBasic = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "oneandone",
|
||||
"disk_size": "50",
|
||||
"snapshot_name": "test5",
|
||||
"image" : "ubuntu1604-64min"
|
||||
}]
|
||||
}
|
||||
`
|
@ -1,56 +0,0 @@
|
||||
package oneandone
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
func testConfig() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"type": "oneandone",
|
||||
"disk_size": "50",
|
||||
"snapshot_name": "test5",
|
||||
"image": "ubuntu1604-64min",
|
||||
}
|
||||
}
|
||||
|
||||
func TestImplementsBuilder(t *testing.T) {
|
||||
var raw interface{}
|
||||
raw = &Builder{}
|
||||
if _, ok := raw.(packersdk.Builder); !ok {
|
||||
t.Fatalf("Builder should be a builder")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilder_Prepare_BadType(t *testing.T) {
|
||||
b := &Builder{}
|
||||
c := map[string]interface{}{
|
||||
"api_key": []string{},
|
||||
}
|
||||
|
||||
_, warns, err := b.Prepare(c)
|
||||
if len(warns) > 0 {
|
||||
t.Fatalf("bad: %#v", warns)
|
||||
}
|
||||
if err == nil {
|
||||
fmt.Println(err)
|
||||
fmt.Println(warns)
|
||||
t.Fatalf("prepare should fail")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_InvalidKey(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
config["i_should_not_be_valid"] = true
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
}
|
@ -1,113 +0,0 @@
|
||||
//go:generate packer-sdc mapstructure-to-hcl2 -type Config
|
||||
|
||||
package oneandone
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/1and1/oneandone-cloudserver-sdk-go"
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
Comm communicator.Config `mapstructure:",squash"`
|
||||
|
||||
Token string `mapstructure:"token"`
|
||||
Url string `mapstructure:"url"`
|
||||
SnapshotName string `mapstructure:"image_name"`
|
||||
DataCenterName string `mapstructure:"data_center_name"`
|
||||
DataCenterId string
|
||||
Image string `mapstructure:"source_image_name"`
|
||||
DiskSize int `mapstructure:"disk_size"`
|
||||
Retries int `mapstructure:"retries"`
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
func (c *Config) Prepare(raws ...interface{}) ([]string, error) {
|
||||
|
||||
var md mapstructure.Metadata
|
||||
err := config.Decode(c, &config.DecodeOpts{
|
||||
Metadata: &md,
|
||||
Interpolate: true,
|
||||
InterpolateContext: &c.ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{
|
||||
"run_command",
|
||||
},
|
||||
},
|
||||
}, raws...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var errs *packersdk.MultiError
|
||||
|
||||
if c.SnapshotName == "" {
|
||||
def, err := interpolate.Render("packer-{{timestamp}}", nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Default to packer-{{ unix timestamp (utc) }}
|
||||
c.SnapshotName = def
|
||||
}
|
||||
|
||||
if c.Image == "" {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("1&1 'image' is required"))
|
||||
}
|
||||
|
||||
if c.Token == "" {
|
||||
c.Token = os.Getenv("ONEANDONE_TOKEN")
|
||||
}
|
||||
|
||||
if c.Url == "" {
|
||||
c.Url = oneandone.BaseUrl
|
||||
}
|
||||
|
||||
if c.DiskSize == 0 {
|
||||
c.DiskSize = 50
|
||||
}
|
||||
|
||||
if c.Retries == 0 {
|
||||
c.Retries = 600
|
||||
}
|
||||
|
||||
if c.DataCenterName != "" {
|
||||
token := oneandone.SetToken(c.Token)
|
||||
|
||||
//Create an API client
|
||||
api := oneandone.New(token, c.Url)
|
||||
|
||||
dcs, err := api.ListDatacenters()
|
||||
|
||||
if err != nil {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, err)
|
||||
}
|
||||
for _, dc := range dcs {
|
||||
if strings.EqualFold(dc.CountryCode, c.DataCenterName) {
|
||||
c.DataCenterId = dc.Id
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if es := c.Comm.Prepare(&c.ctx); len(es) > 0 {
|
||||
errs = packersdk.MultiErrorAppend(errs, es...)
|
||||
}
|
||||
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return nil, errs
|
||||
}
|
||||
packersdk.LogSecretFilter.Set(c.Token)
|
||||
return nil, nil
|
||||
}
|
@ -1,159 +0,0 @@
|
||||
// Code generated by "packer-sdc mapstructure-to-hcl2"; DO NOT EDIT.
|
||||
|
||||
package oneandone
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatConfig struct {
|
||||
PackerBuildName *string `mapstructure:"packer_build_name" cty:"packer_build_name" hcl:"packer_build_name"`
|
||||
PackerBuilderType *string `mapstructure:"packer_builder_type" cty:"packer_builder_type" hcl:"packer_builder_type"`
|
||||
PackerCoreVersion *string `mapstructure:"packer_core_version" cty:"packer_core_version" hcl:"packer_core_version"`
|
||||
PackerDebug *bool `mapstructure:"packer_debug" cty:"packer_debug" hcl:"packer_debug"`
|
||||
PackerForce *bool `mapstructure:"packer_force" cty:"packer_force" hcl:"packer_force"`
|
||||
PackerOnError *string `mapstructure:"packer_on_error" cty:"packer_on_error" hcl:"packer_on_error"`
|
||||
PackerUserVars map[string]string `mapstructure:"packer_user_variables" cty:"packer_user_variables" hcl:"packer_user_variables"`
|
||||
PackerSensitiveVars []string `mapstructure:"packer_sensitive_variables" cty:"packer_sensitive_variables" hcl:"packer_sensitive_variables"`
|
||||
Type *string `mapstructure:"communicator" cty:"communicator" hcl:"communicator"`
|
||||
PauseBeforeConnect *string `mapstructure:"pause_before_connecting" cty:"pause_before_connecting" hcl:"pause_before_connecting"`
|
||||
SSHHost *string `mapstructure:"ssh_host" cty:"ssh_host" hcl:"ssh_host"`
|
||||
SSHPort *int `mapstructure:"ssh_port" cty:"ssh_port" hcl:"ssh_port"`
|
||||
SSHUsername *string `mapstructure:"ssh_username" cty:"ssh_username" hcl:"ssh_username"`
|
||||
SSHPassword *string `mapstructure:"ssh_password" cty:"ssh_password" hcl:"ssh_password"`
|
||||
SSHKeyPairName *string `mapstructure:"ssh_keypair_name" undocumented:"true" cty:"ssh_keypair_name" hcl:"ssh_keypair_name"`
|
||||
SSHTemporaryKeyPairName *string `mapstructure:"temporary_key_pair_name" undocumented:"true" cty:"temporary_key_pair_name" hcl:"temporary_key_pair_name"`
|
||||
SSHTemporaryKeyPairType *string `mapstructure:"temporary_key_pair_type" cty:"temporary_key_pair_type" hcl:"temporary_key_pair_type"`
|
||||
SSHTemporaryKeyPairBits *int `mapstructure:"temporary_key_pair_bits" cty:"temporary_key_pair_bits" hcl:"temporary_key_pair_bits"`
|
||||
SSHCiphers []string `mapstructure:"ssh_ciphers" cty:"ssh_ciphers" hcl:"ssh_ciphers"`
|
||||
SSHClearAuthorizedKeys *bool `mapstructure:"ssh_clear_authorized_keys" cty:"ssh_clear_authorized_keys" hcl:"ssh_clear_authorized_keys"`
|
||||
SSHKEXAlgos []string `mapstructure:"ssh_key_exchange_algorithms" cty:"ssh_key_exchange_algorithms" hcl:"ssh_key_exchange_algorithms"`
|
||||
SSHPrivateKeyFile *string `mapstructure:"ssh_private_key_file" undocumented:"true" cty:"ssh_private_key_file" hcl:"ssh_private_key_file"`
|
||||
SSHCertificateFile *string `mapstructure:"ssh_certificate_file" cty:"ssh_certificate_file" hcl:"ssh_certificate_file"`
|
||||
SSHPty *bool `mapstructure:"ssh_pty" cty:"ssh_pty" hcl:"ssh_pty"`
|
||||
SSHTimeout *string `mapstructure:"ssh_timeout" cty:"ssh_timeout" hcl:"ssh_timeout"`
|
||||
SSHWaitTimeout *string `mapstructure:"ssh_wait_timeout" undocumented:"true" cty:"ssh_wait_timeout" hcl:"ssh_wait_timeout"`
|
||||
SSHAgentAuth *bool `mapstructure:"ssh_agent_auth" undocumented:"true" cty:"ssh_agent_auth" hcl:"ssh_agent_auth"`
|
||||
SSHDisableAgentForwarding *bool `mapstructure:"ssh_disable_agent_forwarding" cty:"ssh_disable_agent_forwarding" hcl:"ssh_disable_agent_forwarding"`
|
||||
SSHHandshakeAttempts *int `mapstructure:"ssh_handshake_attempts" cty:"ssh_handshake_attempts" hcl:"ssh_handshake_attempts"`
|
||||
SSHBastionHost *string `mapstructure:"ssh_bastion_host" cty:"ssh_bastion_host" hcl:"ssh_bastion_host"`
|
||||
SSHBastionPort *int `mapstructure:"ssh_bastion_port" cty:"ssh_bastion_port" hcl:"ssh_bastion_port"`
|
||||
SSHBastionAgentAuth *bool `mapstructure:"ssh_bastion_agent_auth" cty:"ssh_bastion_agent_auth" hcl:"ssh_bastion_agent_auth"`
|
||||
SSHBastionUsername *string `mapstructure:"ssh_bastion_username" cty:"ssh_bastion_username" hcl:"ssh_bastion_username"`
|
||||
SSHBastionPassword *string `mapstructure:"ssh_bastion_password" cty:"ssh_bastion_password" hcl:"ssh_bastion_password"`
|
||||
SSHBastionInteractive *bool `mapstructure:"ssh_bastion_interactive" cty:"ssh_bastion_interactive" hcl:"ssh_bastion_interactive"`
|
||||
SSHBastionPrivateKeyFile *string `mapstructure:"ssh_bastion_private_key_file" cty:"ssh_bastion_private_key_file" hcl:"ssh_bastion_private_key_file"`
|
||||
SSHBastionCertificateFile *string `mapstructure:"ssh_bastion_certificate_file" cty:"ssh_bastion_certificate_file" hcl:"ssh_bastion_certificate_file"`
|
||||
SSHFileTransferMethod *string `mapstructure:"ssh_file_transfer_method" cty:"ssh_file_transfer_method" hcl:"ssh_file_transfer_method"`
|
||||
SSHProxyHost *string `mapstructure:"ssh_proxy_host" cty:"ssh_proxy_host" hcl:"ssh_proxy_host"`
|
||||
SSHProxyPort *int `mapstructure:"ssh_proxy_port" cty:"ssh_proxy_port" hcl:"ssh_proxy_port"`
|
||||
SSHProxyUsername *string `mapstructure:"ssh_proxy_username" cty:"ssh_proxy_username" hcl:"ssh_proxy_username"`
|
||||
SSHProxyPassword *string `mapstructure:"ssh_proxy_password" cty:"ssh_proxy_password" hcl:"ssh_proxy_password"`
|
||||
SSHKeepAliveInterval *string `mapstructure:"ssh_keep_alive_interval" cty:"ssh_keep_alive_interval" hcl:"ssh_keep_alive_interval"`
|
||||
SSHReadWriteTimeout *string `mapstructure:"ssh_read_write_timeout" cty:"ssh_read_write_timeout" hcl:"ssh_read_write_timeout"`
|
||||
SSHRemoteTunnels []string `mapstructure:"ssh_remote_tunnels" cty:"ssh_remote_tunnels" hcl:"ssh_remote_tunnels"`
|
||||
SSHLocalTunnels []string `mapstructure:"ssh_local_tunnels" cty:"ssh_local_tunnels" hcl:"ssh_local_tunnels"`
|
||||
SSHPublicKey []byte `mapstructure:"ssh_public_key" undocumented:"true" cty:"ssh_public_key" hcl:"ssh_public_key"`
|
||||
SSHPrivateKey []byte `mapstructure:"ssh_private_key" undocumented:"true" cty:"ssh_private_key" hcl:"ssh_private_key"`
|
||||
WinRMUser *string `mapstructure:"winrm_username" cty:"winrm_username" hcl:"winrm_username"`
|
||||
WinRMPassword *string `mapstructure:"winrm_password" cty:"winrm_password" hcl:"winrm_password"`
|
||||
WinRMHost *string `mapstructure:"winrm_host" cty:"winrm_host" hcl:"winrm_host"`
|
||||
WinRMNoProxy *bool `mapstructure:"winrm_no_proxy" cty:"winrm_no_proxy" hcl:"winrm_no_proxy"`
|
||||
WinRMPort *int `mapstructure:"winrm_port" cty:"winrm_port" hcl:"winrm_port"`
|
||||
WinRMTimeout *string `mapstructure:"winrm_timeout" cty:"winrm_timeout" hcl:"winrm_timeout"`
|
||||
WinRMUseSSL *bool `mapstructure:"winrm_use_ssl" cty:"winrm_use_ssl" hcl:"winrm_use_ssl"`
|
||||
WinRMInsecure *bool `mapstructure:"winrm_insecure" cty:"winrm_insecure" hcl:"winrm_insecure"`
|
||||
WinRMUseNTLM *bool `mapstructure:"winrm_use_ntlm" cty:"winrm_use_ntlm" hcl:"winrm_use_ntlm"`
|
||||
Token *string `mapstructure:"token" cty:"token" hcl:"token"`
|
||||
Url *string `mapstructure:"url" cty:"url" hcl:"url"`
|
||||
SnapshotName *string `mapstructure:"image_name" cty:"image_name" hcl:"image_name"`
|
||||
DataCenterName *string `mapstructure:"data_center_name" cty:"data_center_name" hcl:"data_center_name"`
|
||||
DataCenterId *string `cty:"data_center_id" hcl:"data_center_id"`
|
||||
Image *string `mapstructure:"source_image_name" cty:"source_image_name" hcl:"source_image_name"`
|
||||
DiskSize *int `mapstructure:"disk_size" cty:"disk_size" hcl:"disk_size"`
|
||||
Retries *int `mapstructure:"retries" cty:"retries" hcl:"retries"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatConfig.
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*Config) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatConfig)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a Config.
|
||||
// This spec is used by HCL to read the fields of Config.
|
||||
// The decoded values from this spec will then be applied to a FlatConfig.
|
||||
func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"packer_build_name": &hcldec.AttrSpec{Name: "packer_build_name", Type: cty.String, Required: false},
|
||||
"packer_builder_type": &hcldec.AttrSpec{Name: "packer_builder_type", Type: cty.String, Required: false},
|
||||
"packer_core_version": &hcldec.AttrSpec{Name: "packer_core_version", Type: cty.String, Required: false},
|
||||
"packer_debug": &hcldec.AttrSpec{Name: "packer_debug", Type: cty.Bool, Required: false},
|
||||
"packer_force": &hcldec.AttrSpec{Name: "packer_force", Type: cty.Bool, Required: false},
|
||||
"packer_on_error": &hcldec.AttrSpec{Name: "packer_on_error", Type: cty.String, Required: false},
|
||||
"packer_user_variables": &hcldec.AttrSpec{Name: "packer_user_variables", Type: cty.Map(cty.String), Required: false},
|
||||
"packer_sensitive_variables": &hcldec.AttrSpec{Name: "packer_sensitive_variables", Type: cty.List(cty.String), Required: false},
|
||||
"communicator": &hcldec.AttrSpec{Name: "communicator", Type: cty.String, Required: false},
|
||||
"pause_before_connecting": &hcldec.AttrSpec{Name: "pause_before_connecting", Type: cty.String, Required: false},
|
||||
"ssh_host": &hcldec.AttrSpec{Name: "ssh_host", Type: cty.String, Required: false},
|
||||
"ssh_port": &hcldec.AttrSpec{Name: "ssh_port", Type: cty.Number, Required: false},
|
||||
"ssh_username": &hcldec.AttrSpec{Name: "ssh_username", Type: cty.String, Required: false},
|
||||
"ssh_password": &hcldec.AttrSpec{Name: "ssh_password", Type: cty.String, Required: false},
|
||||
"ssh_keypair_name": &hcldec.AttrSpec{Name: "ssh_keypair_name", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_name": &hcldec.AttrSpec{Name: "temporary_key_pair_name", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_type": &hcldec.AttrSpec{Name: "temporary_key_pair_type", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_bits": &hcldec.AttrSpec{Name: "temporary_key_pair_bits", Type: cty.Number, Required: false},
|
||||
"ssh_ciphers": &hcldec.AttrSpec{Name: "ssh_ciphers", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_clear_authorized_keys": &hcldec.AttrSpec{Name: "ssh_clear_authorized_keys", Type: cty.Bool, Required: false},
|
||||
"ssh_key_exchange_algorithms": &hcldec.AttrSpec{Name: "ssh_key_exchange_algorithms", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_private_key_file": &hcldec.AttrSpec{Name: "ssh_private_key_file", Type: cty.String, Required: false},
|
||||
"ssh_certificate_file": &hcldec.AttrSpec{Name: "ssh_certificate_file", Type: cty.String, Required: false},
|
||||
"ssh_pty": &hcldec.AttrSpec{Name: "ssh_pty", Type: cty.Bool, Required: false},
|
||||
"ssh_timeout": &hcldec.AttrSpec{Name: "ssh_timeout", Type: cty.String, Required: false},
|
||||
"ssh_wait_timeout": &hcldec.AttrSpec{Name: "ssh_wait_timeout", Type: cty.String, Required: false},
|
||||
"ssh_agent_auth": &hcldec.AttrSpec{Name: "ssh_agent_auth", Type: cty.Bool, Required: false},
|
||||
"ssh_disable_agent_forwarding": &hcldec.AttrSpec{Name: "ssh_disable_agent_forwarding", Type: cty.Bool, Required: false},
|
||||
"ssh_handshake_attempts": &hcldec.AttrSpec{Name: "ssh_handshake_attempts", Type: cty.Number, Required: false},
|
||||
"ssh_bastion_host": &hcldec.AttrSpec{Name: "ssh_bastion_host", Type: cty.String, Required: false},
|
||||
"ssh_bastion_port": &hcldec.AttrSpec{Name: "ssh_bastion_port", Type: cty.Number, Required: false},
|
||||
"ssh_bastion_agent_auth": &hcldec.AttrSpec{Name: "ssh_bastion_agent_auth", Type: cty.Bool, Required: false},
|
||||
"ssh_bastion_username": &hcldec.AttrSpec{Name: "ssh_bastion_username", Type: cty.String, Required: false},
|
||||
"ssh_bastion_password": &hcldec.AttrSpec{Name: "ssh_bastion_password", Type: cty.String, Required: false},
|
||||
"ssh_bastion_interactive": &hcldec.AttrSpec{Name: "ssh_bastion_interactive", Type: cty.Bool, Required: false},
|
||||
"ssh_bastion_private_key_file": &hcldec.AttrSpec{Name: "ssh_bastion_private_key_file", Type: cty.String, Required: false},
|
||||
"ssh_bastion_certificate_file": &hcldec.AttrSpec{Name: "ssh_bastion_certificate_file", Type: cty.String, Required: false},
|
||||
"ssh_file_transfer_method": &hcldec.AttrSpec{Name: "ssh_file_transfer_method", Type: cty.String, Required: false},
|
||||
"ssh_proxy_host": &hcldec.AttrSpec{Name: "ssh_proxy_host", Type: cty.String, Required: false},
|
||||
"ssh_proxy_port": &hcldec.AttrSpec{Name: "ssh_proxy_port", Type: cty.Number, Required: false},
|
||||
"ssh_proxy_username": &hcldec.AttrSpec{Name: "ssh_proxy_username", Type: cty.String, Required: false},
|
||||
"ssh_proxy_password": &hcldec.AttrSpec{Name: "ssh_proxy_password", Type: cty.String, Required: false},
|
||||
"ssh_keep_alive_interval": &hcldec.AttrSpec{Name: "ssh_keep_alive_interval", Type: cty.String, Required: false},
|
||||
"ssh_read_write_timeout": &hcldec.AttrSpec{Name: "ssh_read_write_timeout", Type: cty.String, Required: false},
|
||||
"ssh_remote_tunnels": &hcldec.AttrSpec{Name: "ssh_remote_tunnels", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_local_tunnels": &hcldec.AttrSpec{Name: "ssh_local_tunnels", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_public_key": &hcldec.AttrSpec{Name: "ssh_public_key", Type: cty.List(cty.Number), Required: false},
|
||||
"ssh_private_key": &hcldec.AttrSpec{Name: "ssh_private_key", Type: cty.List(cty.Number), Required: false},
|
||||
"winrm_username": &hcldec.AttrSpec{Name: "winrm_username", Type: cty.String, Required: false},
|
||||
"winrm_password": &hcldec.AttrSpec{Name: "winrm_password", Type: cty.String, Required: false},
|
||||
"winrm_host": &hcldec.AttrSpec{Name: "winrm_host", Type: cty.String, Required: false},
|
||||
"winrm_no_proxy": &hcldec.AttrSpec{Name: "winrm_no_proxy", Type: cty.Bool, Required: false},
|
||||
"winrm_port": &hcldec.AttrSpec{Name: "winrm_port", Type: cty.Number, Required: false},
|
||||
"winrm_timeout": &hcldec.AttrSpec{Name: "winrm_timeout", Type: cty.String, Required: false},
|
||||
"winrm_use_ssl": &hcldec.AttrSpec{Name: "winrm_use_ssl", Type: cty.Bool, Required: false},
|
||||
"winrm_insecure": &hcldec.AttrSpec{Name: "winrm_insecure", Type: cty.Bool, Required: false},
|
||||
"winrm_use_ntlm": &hcldec.AttrSpec{Name: "winrm_use_ntlm", Type: cty.Bool, Required: false},
|
||||
"token": &hcldec.AttrSpec{Name: "token", Type: cty.String, Required: false},
|
||||
"url": &hcldec.AttrSpec{Name: "url", Type: cty.String, Required: false},
|
||||
"image_name": &hcldec.AttrSpec{Name: "image_name", Type: cty.String, Required: false},
|
||||
"data_center_name": &hcldec.AttrSpec{Name: "data_center_name", Type: cty.String, Required: false},
|
||||
"data_center_id": &hcldec.AttrSpec{Name: "data_center_id", Type: cty.String, Required: false},
|
||||
"source_image_name": &hcldec.AttrSpec{Name: "source_image_name", Type: cty.String, Required: false},
|
||||
"disk_size": &hcldec.AttrSpec{Name: "disk_size", Type: cty.Number, Required: false},
|
||||
"retries": &hcldec.AttrSpec{Name: "retries", Type: cty.Number, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
@ -1,142 +0,0 @@
|
||||
package oneandone
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/1and1/oneandone-cloudserver-sdk-go"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
type stepCreateServer struct{}
|
||||
|
||||
func (s *stepCreateServer) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
c := state.Get("config").(*Config)
|
||||
|
||||
token := oneandone.SetToken(c.Token)
|
||||
|
||||
//Create an API client
|
||||
api := oneandone.New(token, c.Url)
|
||||
|
||||
// List server appliances
|
||||
saps, _ := api.ListServerAppliances()
|
||||
|
||||
time.Sleep(time.Second * 10)
|
||||
|
||||
var sa oneandone.ServerAppliance
|
||||
for _, a := range saps {
|
||||
|
||||
if a.Type == "IMAGE" && strings.Contains(strings.ToLower(a.Name), strings.ToLower(c.Image)) {
|
||||
sa = a
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if c.DiskSize < sa.MinHddSize {
|
||||
ui.Error(fmt.Sprintf("Minimum required disk size %d", sa.MinHddSize))
|
||||
}
|
||||
|
||||
ui.Say("Creating Server...")
|
||||
|
||||
// Create a server
|
||||
req := oneandone.ServerRequest{
|
||||
Name: c.SnapshotName,
|
||||
Description: "Example server description.",
|
||||
ApplianceId: sa.Id,
|
||||
PowerOn: true,
|
||||
Hardware: oneandone.Hardware{
|
||||
Vcores: 1,
|
||||
CoresPerProcessor: 1,
|
||||
Ram: 2,
|
||||
Hdds: []oneandone.Hdd{
|
||||
{
|
||||
Size: c.DiskSize,
|
||||
IsMain: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if c.DataCenterId != "" {
|
||||
req.DatacenterId = c.DataCenterId
|
||||
}
|
||||
|
||||
if c.Comm.SSHPassword != "" {
|
||||
req.Password = c.Comm.SSHPassword
|
||||
}
|
||||
if len(c.Comm.SSHPublicKey) != 0 {
|
||||
req.SSHKey = string(c.Comm.SSHPublicKey)
|
||||
}
|
||||
|
||||
server_id, server, err := api.CreateServer(&req)
|
||||
if err != nil {
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Wait until server is created and powered on for at most 60 x 10 seconds
|
||||
err = api.WaitForState(server, "POWERED_ON", 10, c.Retries)
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Timeout waiting for server: %s", server_id))
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Get a server
|
||||
server, err = api.GetServer(server_id)
|
||||
if err != nil {
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
state.Put("server_id", server_id)
|
||||
// instance_id is the generic term used so that users can have access to the
|
||||
// instance id inside of the provisioners, used in step_provision.
|
||||
state.Put("instance_id", server_id)
|
||||
|
||||
state.Put("server_ip", server.Ips[0].Ip)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepCreateServer) Cleanup(state multistep.StateBag) {
|
||||
c := state.Get("config").(*Config)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
ui.Say("Removing Server...")
|
||||
|
||||
token := oneandone.SetToken(c.Token)
|
||||
//Create an API client
|
||||
api := oneandone.New(token, oneandone.BaseUrl)
|
||||
|
||||
var serverId string
|
||||
if temp, ok := state.GetOk("server_id"); ok {
|
||||
serverId = temp.(string)
|
||||
}
|
||||
|
||||
if serverId != "" {
|
||||
server, err := api.ShutdownServer(serverId, false)
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Error shutting down 1and1 server. Please destroy it manually: %s", serverId))
|
||||
ui.Error(err.Error())
|
||||
}
|
||||
err = api.WaitForState(server, "POWERED_OFF", 10, c.Retries)
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error waiting for 1and1 POWERED_OFF state. Please destroy it manually: %s",
|
||||
serverId))
|
||||
ui.Error(err.Error())
|
||||
}
|
||||
|
||||
_, err = api.DeleteServer(server.Id, false)
|
||||
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Error deleting 1and1 server. Please destroy it manually: %s", serverId))
|
||||
ui.Error(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
@ -1,62 +0,0 @@
|
||||
package oneandone
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
type StepCreateSSHKey struct {
|
||||
Debug bool
|
||||
DebugKeyPath string
|
||||
}
|
||||
|
||||
func (s *StepCreateSSHKey) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
c := state.Get("config").(*Config)
|
||||
|
||||
if c.Comm.SSHPrivateKeyFile != "" {
|
||||
pemBytes, err := ioutil.ReadFile(c.Comm.SSHPrivateKeyFile)
|
||||
|
||||
if err != nil {
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
block, _ := pem.Decode(pemBytes)
|
||||
|
||||
priv, err := x509.ParsePKCS1PrivateKey(block.Bytes)
|
||||
|
||||
if err != nil {
|
||||
|
||||
state.Put("error", err.Error())
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
priv_blk := pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Headers: nil,
|
||||
Bytes: x509.MarshalPKCS1PrivateKey(priv),
|
||||
}
|
||||
|
||||
pub, err := ssh.NewPublicKey(&priv.PublicKey)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating temporary ssh key: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
c.Comm.SSHPrivateKey = pem.EncodeToMemory(&priv_blk)
|
||||
c.Comm.SSHPublicKey = ssh.MarshalAuthorizedKey(pub)
|
||||
}
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepCreateSSHKey) Cleanup(state multistep.StateBag) {}
|
@ -1,52 +0,0 @@
|
||||
package oneandone
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/1and1/oneandone-cloudserver-sdk-go"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
type stepTakeSnapshot struct{}
|
||||
|
||||
func (s *stepTakeSnapshot) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
c := state.Get("config").(*Config)
|
||||
|
||||
ui.Say("Creating Snapshot...")
|
||||
|
||||
token := oneandone.SetToken(c.Token)
|
||||
api := oneandone.New(token, c.Url)
|
||||
|
||||
serverId := state.Get("server_id").(string)
|
||||
|
||||
req := oneandone.ImageConfig{
|
||||
Name: c.SnapshotName,
|
||||
Description: "Packer image",
|
||||
ServerId: serverId,
|
||||
Frequency: "WEEKLY",
|
||||
NumImages: 1,
|
||||
}
|
||||
|
||||
img_id, img, err := api.CreateImage(&req)
|
||||
|
||||
if err != nil {
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
err = api.WaitForState(img, "ENABLED", 10, c.Retries)
|
||||
|
||||
if err != nil {
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
state.Put("snapshot_id", img_id)
|
||||
state.Put("snapshot_name", img.Name)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepTakeSnapshot) Cleanup(state multistep.StateBag) {
|
||||
}
|
@ -1,13 +0,0 @@
|
||||
package version
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/packer-plugin-sdk/version"
|
||||
packerVersion "github.com/hashicorp/packer/version"
|
||||
)
|
||||
|
||||
var OneAndOnePluginVersion *version.PluginVersion
|
||||
|
||||
func init() {
|
||||
OneAndOnePluginVersion = version.InitializePluginVersion(
|
||||
packerVersion.Version, packerVersion.VersionPrerelease)
|
||||
}
|
@ -156,6 +156,7 @@ func Test_fmt_pipe(t *testing.T) {
|
||||
expected string
|
||||
}{
|
||||
{unformattedHCL, []string{"fmt", "-"}, nil, formattedHCL},
|
||||
{formattedHCL, []string{"fmt", "-"}, nil, formattedHCL},
|
||||
}
|
||||
|
||||
for _, tc := range tc {
|
||||
|
@ -153,7 +153,7 @@ func (c *HCL2UpgradeCommand) RunContext(_ context.Context, cla *HCL2UpgradeArgs)
|
||||
|
||||
hdl, ret := c.GetConfigFromJSON(&cla.MetaArgs)
|
||||
if ret != 0 {
|
||||
return ret
|
||||
c.Ui.Error(fmt.Sprintf("Failed to get config from JSON"))
|
||||
}
|
||||
|
||||
core := hdl.(*CoreWrapper).Core
|
||||
@ -168,16 +168,16 @@ func (c *HCL2UpgradeCommand) RunContext(_ context.Context, cla *HCL2UpgradeArgs)
|
||||
WithAnnotations: cla.WithAnnotations,
|
||||
}
|
||||
if err := packerBlock.Parse(tpl); err != nil {
|
||||
c.Ui.Error(err.Error())
|
||||
return 1
|
||||
c.Ui.Error(fmt.Sprintf("Ignoring following Parse error: %v", err))
|
||||
ret = 1
|
||||
}
|
||||
|
||||
variables := &VariableParser{
|
||||
WithAnnotations: cla.WithAnnotations,
|
||||
}
|
||||
if err := variables.Parse(tpl); err != nil {
|
||||
c.Ui.Error(err.Error())
|
||||
return 1
|
||||
c.Ui.Error(fmt.Sprintf("Ignoring following variables.Parse error: %v", err))
|
||||
ret = 1
|
||||
}
|
||||
|
||||
locals := &LocalsParser{
|
||||
@ -185,8 +185,8 @@ func (c *HCL2UpgradeCommand) RunContext(_ context.Context, cla *HCL2UpgradeArgs)
|
||||
WithAnnotations: cla.WithAnnotations,
|
||||
}
|
||||
if err := locals.Parse(tpl); err != nil {
|
||||
c.Ui.Error(err.Error())
|
||||
return 1
|
||||
c.Ui.Error(fmt.Sprintf("Ignoring following locals.Parse error: %v", err))
|
||||
ret = 1
|
||||
}
|
||||
|
||||
builders := []*template.Builder{}
|
||||
@ -205,8 +205,8 @@ func (c *HCL2UpgradeCommand) RunContext(_ context.Context, cla *HCL2UpgradeArgs)
|
||||
WithAnnotations: cla.WithAnnotations,
|
||||
}
|
||||
if err := amazonAmiDatasource.Parse(tpl); err != nil {
|
||||
c.Ui.Error(err.Error())
|
||||
return 1
|
||||
c.Ui.Error(fmt.Sprintf("Ignoring following amazonAmiDatasource.Parse error: %v", err))
|
||||
ret = 1
|
||||
}
|
||||
|
||||
sources := &SourceParser{
|
||||
@ -215,8 +215,8 @@ func (c *HCL2UpgradeCommand) RunContext(_ context.Context, cla *HCL2UpgradeArgs)
|
||||
WithAnnotations: cla.WithAnnotations,
|
||||
}
|
||||
if err := sources.Parse(tpl); err != nil {
|
||||
c.Ui.Error(err.Error())
|
||||
return 1
|
||||
c.Ui.Error(fmt.Sprintf("Ignoring following sources.Parse error: %v", err))
|
||||
ret = 1
|
||||
}
|
||||
|
||||
build := &BuildParser{
|
||||
@ -224,16 +224,16 @@ func (c *HCL2UpgradeCommand) RunContext(_ context.Context, cla *HCL2UpgradeArgs)
|
||||
WithAnnotations: cla.WithAnnotations,
|
||||
}
|
||||
if err := build.Parse(tpl); err != nil {
|
||||
c.Ui.Error(err.Error())
|
||||
return 1
|
||||
c.Ui.Error(fmt.Sprintf("Ignoring following build.Parse error: %v", err))
|
||||
ret = 1
|
||||
}
|
||||
|
||||
amazonSecretsDatasource := &AmazonSecretsDatasourceParser{
|
||||
WithAnnotations: cla.WithAnnotations,
|
||||
}
|
||||
if err := amazonSecretsDatasource.Parse(tpl); err != nil {
|
||||
c.Ui.Error(err.Error())
|
||||
return 1
|
||||
c.Ui.Error(fmt.Sprintf("Ignoring following amazonSecretsDatasource.Parse error: %v", err))
|
||||
ret = 1
|
||||
}
|
||||
|
||||
// Write file
|
||||
@ -255,8 +255,8 @@ func (c *HCL2UpgradeCommand) RunContext(_ context.Context, cla *HCL2UpgradeArgs)
|
||||
return 1
|
||||
}
|
||||
|
||||
c.Ui.Say(fmt.Sprintf("Successfully created %s ", cla.OutputFile))
|
||||
return 0
|
||||
c.Ui.Say(fmt.Sprintf("Successfully created %s. Exit %d", cla.OutputFile, ret))
|
||||
return ret
|
||||
}
|
||||
|
||||
type UnhandleableArgumentError struct {
|
||||
@ -971,6 +971,7 @@ type SourceParser struct {
|
||||
}
|
||||
|
||||
func (p *SourceParser) Parse(tpl *template.Template) error {
|
||||
var unknownBuilders []string
|
||||
if p.out == nil {
|
||||
p.out = []byte{}
|
||||
}
|
||||
@ -980,7 +981,8 @@ func (p *SourceParser) Parse(tpl *template.Template) error {
|
||||
|
||||
body.AppendNewline()
|
||||
if !p.BuilderPlugins.Has(builderCfg.Type) {
|
||||
return fmt.Errorf("unknown builder type: %q\n", builderCfg.Type)
|
||||
unknownBuilders = append(unknownBuilders, builderCfg.Type)
|
||||
|
||||
}
|
||||
if builderCfg.Name == "" || builderCfg.Name == builderCfg.Type {
|
||||
builderCfg.Name = fmt.Sprintf("autogenerated_%d", i+1)
|
||||
@ -993,6 +995,9 @@ func (p *SourceParser) Parse(tpl *template.Template) error {
|
||||
|
||||
p.out = append(p.out, transposeTemplatingCalls(sourcesContent.Bytes())...)
|
||||
}
|
||||
if len(unknownBuilders) > 0 {
|
||||
return fmt.Errorf("unknown builder type(s): %v\n", unknownBuilders)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -17,9 +17,11 @@ func Test_hcl2_upgrade(t *testing.T) {
|
||||
_ = cwd
|
||||
|
||||
tc := []struct {
|
||||
folder string
|
||||
flags []string
|
||||
folder string
|
||||
flags []string
|
||||
exitCode int
|
||||
}{
|
||||
{folder: "unknown_builder", flags: []string{}, exitCode: 1},
|
||||
{folder: "complete", flags: []string{"-with-annotations"}},
|
||||
{folder: "without-annotations", flags: []string{}},
|
||||
{folder: "minimal", flags: []string{"-with-annotations"}},
|
||||
@ -43,9 +45,9 @@ func Test_hcl2_upgrade(t *testing.T) {
|
||||
}
|
||||
args = append(args, inputPath)
|
||||
p := helperCommand(t, args...)
|
||||
bs, err := p.CombinedOutput()
|
||||
err := p.Run()
|
||||
if err != nil {
|
||||
t.Fatalf("%v %s", err, bs)
|
||||
t.Logf("run returned an error: %s", err)
|
||||
}
|
||||
expected := string(mustBytes(ioutil.ReadFile(expectedPath)))
|
||||
actual := string(mustBytes(ioutil.ReadFile(outputPath)))
|
||||
@ -53,6 +55,10 @@ func Test_hcl2_upgrade(t *testing.T) {
|
||||
if diff := cmp.Diff(expected, actual); diff != "" {
|
||||
t.Fatalf("unexpected output: %s", diff)
|
||||
}
|
||||
actualExitCode := p.ProcessState.ExitCode()
|
||||
if tc.exitCode != actualExitCode {
|
||||
t.Fatalf("unexpected exit code: %d found; expected %d ", actualExitCode, tc.exitCode)
|
||||
}
|
||||
os.Remove(outputPath)
|
||||
})
|
||||
}
|
||||
|
@ -15,7 +15,6 @@ import (
|
||||
|
||||
filebuilder "github.com/hashicorp/packer/builder/file"
|
||||
nullbuilder "github.com/hashicorp/packer/builder/null"
|
||||
oneandonebuilder "github.com/hashicorp/packer/builder/oneandone"
|
||||
profitbricksbuilder "github.com/hashicorp/packer/builder/profitbricks"
|
||||
artificepostprocessor "github.com/hashicorp/packer/post-processor/artifice"
|
||||
checksumpostprocessor "github.com/hashicorp/packer/post-processor/checksum"
|
||||
@ -41,7 +40,6 @@ type PluginCommand struct {
|
||||
var Builders = map[string]packersdk.Builder{
|
||||
"file": new(filebuilder.Builder),
|
||||
"null": new(nullbuilder.Builder),
|
||||
"oneandone": new(oneandonebuilder.Builder),
|
||||
"profitbricks": new(profitbricksbuilder.Builder),
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,50 @@
|
||||
packer {
|
||||
required_version = ">= 1.6.0"
|
||||
}
|
||||
|
||||
variable "aws_access_key" {
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "aws_region" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "aws_secret_key" {
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
source "potatoes" "autogenerated_1" {
|
||||
access_key = "${var.aws_access_key}"
|
||||
ami_description = "Ubuntu 16.04 LTS - expand root partition"
|
||||
ami_name = "ubuntu-16-04-test"
|
||||
encrypt_boot = true
|
||||
launch_block_device_mappings {
|
||||
delete_on_termination = true
|
||||
device_name = "/dev/sda1"
|
||||
volume_size = 48
|
||||
volume_type = "gp2"
|
||||
}
|
||||
region = "${var.aws_region}"
|
||||
secret_key = "${var.aws_secret_key}"
|
||||
source_ami = "ami1234567"
|
||||
spot_instance_types = ["t2.small", "t2.medium", "t2.large"]
|
||||
spot_price = "0.0075"
|
||||
ssh_interface = "session_manager"
|
||||
ssh_username = "ubuntu"
|
||||
temporary_iam_instance_profile_policy_document {
|
||||
Statement {
|
||||
Action = ["*"]
|
||||
Effect = "Allow"
|
||||
Resource = ["*"]
|
||||
}
|
||||
Version = "2012-10-17"
|
||||
}
|
||||
}
|
||||
|
||||
build {
|
||||
sources = ["source.potatoes.autogenerated_1"]
|
||||
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
{
|
||||
"min_packer_version": "1.6.0",
|
||||
"variables": {
|
||||
"aws_region": null,
|
||||
"aws_secret_key": "",
|
||||
"aws_access_key": ""
|
||||
},
|
||||
"builders": [
|
||||
{
|
||||
"type": "potatoes",
|
||||
"region": "{{ user `aws_region` }}",
|
||||
"secret_key": "{{ user `aws_secret_key` }}",
|
||||
"access_key": "{{ user `aws_access_key` }}",
|
||||
"ami_name": "ubuntu-16-04-test",
|
||||
"ami_description": "Ubuntu 16.04 LTS - expand root partition",
|
||||
"source_ami": "ami1234567",
|
||||
"launch_block_device_mappings": [
|
||||
{
|
||||
"delete_on_termination": true,
|
||||
"device_name": "/dev/sda1",
|
||||
"volume_type": "gp2",
|
||||
"volume_size": 48
|
||||
}
|
||||
],
|
||||
"spot_price": "0.0075",
|
||||
"spot_instance_types": [
|
||||
"t2.small",
|
||||
"t2.medium",
|
||||
"t2.large"
|
||||
],
|
||||
"encrypt_boot": true,
|
||||
"ssh_username": "ubuntu",
|
||||
"temporary_iam_instance_profile_policy_document": {
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"*"
|
||||
],
|
||||
"Resource": ["*"]
|
||||
}
|
||||
]
|
||||
},
|
||||
"ssh_interface": "session_manager"
|
||||
}
|
||||
]
|
||||
}
|
@ -46,6 +46,7 @@ import (
|
||||
lxcbuilder "github.com/hashicorp/packer-plugin-lxc/builder/lxc"
|
||||
lxdbuilder "github.com/hashicorp/packer-plugin-lxd/builder/lxd"
|
||||
ncloudbuilder "github.com/hashicorp/packer-plugin-ncloud/builder/ncloud"
|
||||
oneandonebuilder "github.com/hashicorp/packer-plugin-oneandone/builder/oneandone"
|
||||
openstackbuilder "github.com/hashicorp/packer-plugin-openstack/builder/openstack"
|
||||
oracleclassicbuilder "github.com/hashicorp/packer-plugin-oracle/builder/classic"
|
||||
oracleocibuilder "github.com/hashicorp/packer-plugin-oracle/builder/oci"
|
||||
@ -114,6 +115,7 @@ var VendoredBuilders = map[string]packersdk.Builder{
|
||||
"lxc": new(lxcbuilder.Builder),
|
||||
"lxd": new(lxdbuilder.Builder),
|
||||
"ncloud": new(ncloudbuilder.Builder),
|
||||
"oneandone": new(oneandonebuilder.Builder),
|
||||
"openstack": new(openstackbuilder.Builder),
|
||||
"oracle-classic": new(oracleclassicbuilder.Builder),
|
||||
"oracle-oci": new(oracleocibuilder.Builder),
|
||||
|
8
go.mod
8
go.mod
@ -1,7 +1,6 @@
|
||||
module github.com/hashicorp/packer
|
||||
|
||||
require (
|
||||
github.com/1and1/oneandone-cloudserver-sdk-go v1.0.1
|
||||
github.com/biogo/hts v0.0.0-20160420073057-50da7d4131a3
|
||||
github.com/cheggaaa/pb v1.0.27
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e
|
||||
@ -21,7 +20,7 @@ require (
|
||||
github.com/hashicorp/packer-plugin-alicloud v0.0.2
|
||||
github.com/hashicorp/packer-plugin-amazon v0.0.1
|
||||
github.com/hashicorp/packer-plugin-ansible v0.0.3
|
||||
github.com/hashicorp/packer-plugin-azure v0.0.2
|
||||
github.com/hashicorp/packer-plugin-azure v0.0.3
|
||||
github.com/hashicorp/packer-plugin-chef v0.0.2
|
||||
github.com/hashicorp/packer-plugin-cloudstack v0.0.1
|
||||
github.com/hashicorp/packer-plugin-converge v0.0.1
|
||||
@ -36,6 +35,7 @@ require (
|
||||
github.com/hashicorp/packer-plugin-lxc v0.0.1
|
||||
github.com/hashicorp/packer-plugin-lxd v0.0.1
|
||||
github.com/hashicorp/packer-plugin-ncloud v0.0.2
|
||||
github.com/hashicorp/packer-plugin-oneandone v0.0.1
|
||||
github.com/hashicorp/packer-plugin-openstack v0.0.2
|
||||
github.com/hashicorp/packer-plugin-oracle v0.0.3
|
||||
github.com/hashicorp/packer-plugin-outscale v0.0.1
|
||||
@ -44,7 +44,7 @@ require (
|
||||
github.com/hashicorp/packer-plugin-puppet v0.0.2
|
||||
github.com/hashicorp/packer-plugin-qemu v0.0.1
|
||||
github.com/hashicorp/packer-plugin-scaleway v0.0.1
|
||||
github.com/hashicorp/packer-plugin-sdk v0.2.1
|
||||
github.com/hashicorp/packer-plugin-sdk v0.2.2
|
||||
github.com/hashicorp/packer-plugin-tencentcloud v0.0.1
|
||||
github.com/hashicorp/packer-plugin-triton v0.0.0-20210421085122-768dd7c764d9
|
||||
github.com/hashicorp/packer-plugin-ucloud v0.0.1
|
||||
@ -54,7 +54,7 @@ require (
|
||||
github.com/hashicorp/packer-plugin-vsphere v0.0.1
|
||||
github.com/hashicorp/packer-plugin-yandex v0.0.4
|
||||
github.com/klauspost/pgzip v0.0.0-20151221113845-47f36e165cec
|
||||
github.com/masterzen/winrm v0.0.0-20201030141608-56ca5c5f2380
|
||||
github.com/masterzen/winrm v0.0.0-20210504160029-28ed956f5227
|
||||
github.com/mattn/go-tty v0.0.0-20191112051231-74040eebce08
|
||||
github.com/mitchellh/cli v1.1.0
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
|
13
go.sum
13
go.sum
@ -547,8 +547,8 @@ github.com/hashicorp/packer-plugin-amazon v0.0.1 h1:EuyjNK9bL7WhQeIJzhBJxOx8nyc6
|
||||
github.com/hashicorp/packer-plugin-amazon v0.0.1/go.mod h1:12c9msibyHdId+Mk/pCbdRb1KaLIhaNyxeJ6n8bZt30=
|
||||
github.com/hashicorp/packer-plugin-ansible v0.0.3 h1:pLL2ZqRt4LVBwhtcG/PVgr9WbhfYfIDJ2aWT+Q7ef9U=
|
||||
github.com/hashicorp/packer-plugin-ansible v0.0.3/go.mod h1:5/wOgs7TBwziYCznulfv5AwncLHavXQr83EtpkBVlXg=
|
||||
github.com/hashicorp/packer-plugin-azure v0.0.2 h1:wNEWpkIUzFr/K0ddlipn7W7oJ/m8+RiWZ1xJMsX+hbM=
|
||||
github.com/hashicorp/packer-plugin-azure v0.0.2/go.mod h1:ySskXX3DJV9Z9Yzt3dyrWsN1XUcjeIOtyL7/ZNHs6zw=
|
||||
github.com/hashicorp/packer-plugin-azure v0.0.3 h1:kqHWW5bVXyYq6E9BqcdWTs1XL1ZkWauh3CKSh+JZ8T8=
|
||||
github.com/hashicorp/packer-plugin-azure v0.0.3/go.mod h1:j+tJcKI1nF2I+06c3f6KUdchlgGP2Kc9tgWZ+Cr7uCo=
|
||||
github.com/hashicorp/packer-plugin-chef v0.0.2 h1:JiciRcYGHaHB0LoJ0Y4oSJXrZeH0xbnshcEYGqC3lgI=
|
||||
github.com/hashicorp/packer-plugin-chef v0.0.2/go.mod h1:PxGw+J6PTW74b8MzMDEIoVYHAIr+vCS1n0etz8pqdiM=
|
||||
github.com/hashicorp/packer-plugin-cloudstack v0.0.1 h1:BF9nXRlA0xQV5W/+CoLjWn0aLO60gTbsxnLi/o37ktc=
|
||||
@ -577,6 +577,8 @@ github.com/hashicorp/packer-plugin-lxd v0.0.1 h1:CrFbQmQmdgI3n1RHMPmTUDinRPnPa/b
|
||||
github.com/hashicorp/packer-plugin-lxd v0.0.1/go.mod h1:h3wqgxQiWy8pIJytTjeqXlAd1PXfNTvhTdODnvzDG3w=
|
||||
github.com/hashicorp/packer-plugin-ncloud v0.0.2 h1:MGvGkOVfzeosqOSs5dteghLwv9VRcRxTuLoLX1ssUag=
|
||||
github.com/hashicorp/packer-plugin-ncloud v0.0.2/go.mod h1:Hud2R1pkky96TQy3TPTTrr9Kej4b/4dqC/v+uEE0VDY=
|
||||
github.com/hashicorp/packer-plugin-oneandone v0.0.1 h1:ypaXL9gpQEIE7zAiBU0GrbMxyRhx1izc1uYBewyxYFM=
|
||||
github.com/hashicorp/packer-plugin-oneandone v0.0.1/go.mod h1:7zGckJD65NY3KNfnTDQsky+2USjplzVwtaLQOUgM9es=
|
||||
github.com/hashicorp/packer-plugin-openstack v0.0.2 h1:wGNE8es3Bn9auuIoX+gqT9chXzYY9GlM55eSpM4uwtU=
|
||||
github.com/hashicorp/packer-plugin-openstack v0.0.2/go.mod h1:rHAdd4+JmI+1z98Zx+lVOehgzLZT1Rjo2YgtS0NNvwM=
|
||||
github.com/hashicorp/packer-plugin-oracle v0.0.3 h1:yQEAfCD+TQqEWjrHLJTfdJis7axhwknzCKB07gnTZDA=
|
||||
@ -608,8 +610,8 @@ github.com/hashicorp/packer-plugin-sdk v0.1.3-0.20210407232143-c217d82aefb6/go.m
|
||||
github.com/hashicorp/packer-plugin-sdk v0.1.3/go.mod h1:xePpgQgQYv/bamiypx3hH9ukidxDdcN8q0R0wLi8IEQ=
|
||||
github.com/hashicorp/packer-plugin-sdk v0.1.4/go.mod h1:xePpgQgQYv/bamiypx3hH9ukidxDdcN8q0R0wLi8IEQ=
|
||||
github.com/hashicorp/packer-plugin-sdk v0.2.0/go.mod h1:0DiOMEBldmB0HEhp0npFSSygC8bIvW43pphEgWkp2WU=
|
||||
github.com/hashicorp/packer-plugin-sdk v0.2.1 h1:NZJ9h2ddzZb6E3eaYFD7L4mSjqFia3FDoDTxDGQKNMs=
|
||||
github.com/hashicorp/packer-plugin-sdk v0.2.1/go.mod h1:4V7lS35FRhukvZrW41IPctTPY7JmHPOkFZcR7XGXZPk=
|
||||
github.com/hashicorp/packer-plugin-sdk v0.2.2 h1:z0y0mIk4LoGHleheFNuAjw1/mOoaUPdXSTErICgOBYk=
|
||||
github.com/hashicorp/packer-plugin-sdk v0.2.2/go.mod h1:MAOhxLneNh27t6N6SMyRcIR5qSE86e6yYCcEfRScwIE=
|
||||
github.com/hashicorp/packer-plugin-tencentcloud v0.0.1 h1:DR7GETCzrK/DPFMUPbULIklCxwGhstbbz6pl+2S+UnM=
|
||||
github.com/hashicorp/packer-plugin-tencentcloud v0.0.1/go.mod h1:FmdacMLvDKiT6OdMAc2x4LXtqu/soLApH3jF57SWOik=
|
||||
github.com/hashicorp/packer-plugin-triton v0.0.0-20210421085122-768dd7c764d9 h1:No5oPI9Wa7FhTKkFJwI3hcfUVvEpgPC8QMcG9l/Vxzo=
|
||||
@ -725,8 +727,9 @@ github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9/go.mod h1:kCEb
|
||||
github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 h1:2ZKn+w/BJeL43sCxI2jhPLRv73oVVOjEKZjKkflyqxg=
|
||||
github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc=
|
||||
github.com/masterzen/winrm v0.0.0-20200615185753-c42b5136ff88/go.mod h1:a2HXwefeat3evJHxFXSayvRHpYEPJYtErl4uIzfaUqY=
|
||||
github.com/masterzen/winrm v0.0.0-20201030141608-56ca5c5f2380 h1:uKhPH5dYpx3Z8ZAnaTGfGZUiHOWa5p5mdG8wZlh+tLo=
|
||||
github.com/masterzen/winrm v0.0.0-20201030141608-56ca5c5f2380/go.mod h1:a2HXwefeat3evJHxFXSayvRHpYEPJYtErl4uIzfaUqY=
|
||||
github.com/masterzen/winrm v0.0.0-20210504160029-28ed956f5227 h1:Vcl9dr3dZMIEGpwP1+QSkqFUVQVOopV1nP+I3a6r6tw=
|
||||
github.com/masterzen/winrm v0.0.0-20210504160029-28ed956f5227/go.mod h1:a2HXwefeat3evJHxFXSayvRHpYEPJYtErl4uIzfaUqY=
|
||||
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
|
@ -139,6 +139,10 @@ func (f *HCL2Formatter) processFile(filename string) ([]byte, error) {
|
||||
outSrc := hclwrite.Format(inSrc)
|
||||
|
||||
if bytes.Equal(inSrc, outSrc) {
|
||||
if filename == "-" {
|
||||
_, _ = f.Output.Write(outSrc)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,6 @@ package hcl2template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
@ -227,7 +226,6 @@ func (c *PackerConfig) evaluateLocalVariables(locals []*LocalBlock) hcl.Diagnost
|
||||
var retry, previousL int
|
||||
for len(locals) > 0 {
|
||||
local := locals[0]
|
||||
log.Printf("[DEBUG-10981] Eavluating local: %q", local.Name)
|
||||
moreDiags := c.evaluateLocalVariable(local)
|
||||
if moreDiags.HasErrors() {
|
||||
if len(locals) == 1 {
|
||||
@ -424,6 +422,7 @@ func (cfg *PackerConfig) getCoreBuildPostProcessors(source SourceUseBlock, block
|
||||
func (cfg *PackerConfig) GetBuilds(opts packer.GetBuildsOptions) ([]packersdk.Build, hcl.Diagnostics) {
|
||||
res := []packersdk.Build{}
|
||||
var diags hcl.Diagnostics
|
||||
possibleBuildNames := []string{}
|
||||
|
||||
cfg.debug = opts.Debug
|
||||
cfg.force = opts.Force
|
||||
@ -449,6 +448,7 @@ func (cfg *PackerConfig) GetBuilds(opts packer.GetBuildsOptions) ([]packersdk.Bu
|
||||
|
||||
// Apply the -only and -except command-line options to exclude matching builds.
|
||||
buildName := pcb.Name()
|
||||
possibleBuildNames = append(possibleBuildNames, buildName)
|
||||
// -only
|
||||
if len(opts.Only) > 0 {
|
||||
onlyGlobs, diags := convertFilterOption(opts.Only, "only")
|
||||
@ -466,6 +466,7 @@ func (cfg *PackerConfig) GetBuilds(opts packer.GetBuildsOptions) ([]packersdk.Bu
|
||||
if !include {
|
||||
continue
|
||||
}
|
||||
opts.OnlyMatches++
|
||||
}
|
||||
|
||||
// -except
|
||||
@ -483,6 +484,7 @@ func (cfg *PackerConfig) GetBuilds(opts packer.GetBuildsOptions) ([]packersdk.Bu
|
||||
}
|
||||
}
|
||||
if exclude {
|
||||
opts.ExceptMatches++
|
||||
continue
|
||||
}
|
||||
}
|
||||
@ -552,6 +554,22 @@ func (cfg *PackerConfig) GetBuilds(opts packer.GetBuildsOptions) ([]packersdk.Bu
|
||||
res = append(res, pcb)
|
||||
}
|
||||
}
|
||||
if len(opts.Only) > opts.OnlyMatches {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagWarning,
|
||||
Summary: "an 'only' option was passed, but not all matches were found for the given build.",
|
||||
Detail: fmt.Sprintf("Possible build names: %v.\n"+
|
||||
"These could also be matched with a glob pattern like: 'happycloud.*'", possibleBuildNames),
|
||||
})
|
||||
}
|
||||
if len(opts.Except) > opts.ExceptMatches {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagWarning,
|
||||
Summary: "an 'except' option was passed, but did not match any build.",
|
||||
Detail: fmt.Sprintf("Possible build names: %v.\n"+
|
||||
"These could also be matched with a glob pattern like: 'happycloud.*'", possibleBuildNames),
|
||||
})
|
||||
}
|
||||
return res, diags
|
||||
}
|
||||
|
||||
|
@ -320,7 +320,11 @@ func (c *PluginConfig) DiscoverMultiPlugin(pluginName, pluginPath string) error
|
||||
|
||||
for _, datasourceName := range desc.Datasources {
|
||||
datasourceName := datasourceName // copy to avoid pointer overwrite issue
|
||||
c.DataSources.Set(pluginPrefix+datasourceName, func() (packersdk.Datasource, error) {
|
||||
key := pluginPrefix + datasourceName
|
||||
if datasourceName == pluginsdk.DEFAULT_NAME {
|
||||
key = pluginName
|
||||
}
|
||||
c.DataSources.Set(key, func() (packersdk.Datasource, error) {
|
||||
return c.Client(pluginPath, "start", "datasource", datasourceName).Datasource()
|
||||
})
|
||||
}
|
||||
|
@ -12,6 +12,9 @@ type GetBuildsOptions struct {
|
||||
Except, Only []string
|
||||
Debug, Force bool
|
||||
OnError string
|
||||
|
||||
// count only/except match count; so say something when nothing matched.
|
||||
ExceptMatches, OnlyMatches int
|
||||
}
|
||||
|
||||
type BuildGetter interface {
|
||||
|
@ -1,3 +1,3 @@
|
||||
NEXT_PUBLIC_ALGOLIA_APP_ID=YY0FFNI7MF
|
||||
NEXT_PUBLIC_ALGOLIA_INDEX=product_PACKER
|
||||
NEXT_PUBLIC_ALGOLIA_SEARCH_ONLY_API_KEY=5037da4824714676226913c65e961ca0
|
||||
NEXT_PUBLIC_ALGOLIA_SEARCH_ONLY_API_KEY=bae171c6fdab559ddb3e8a5a168332ce
|
||||
|
42
website/components/animated-terminal/index.tsx
Normal file
42
website/components/animated-terminal/index.tsx
Normal file
@ -0,0 +1,42 @@
|
||||
import CommandLineTerminal from '@hashicorp/react-command-line-terminal'
|
||||
import { useEffect, useState } from 'react'
|
||||
|
||||
export default function AnimatedTerminal({
|
||||
lines,
|
||||
frameLength = 1000,
|
||||
paused,
|
||||
loop,
|
||||
}) {
|
||||
// Determine the total number of frames
|
||||
let totalFrames = 0
|
||||
lines.forEach((line) => {
|
||||
let frames = line.frames ? line.frames : 1
|
||||
if (Array.isArray(line.code)) {
|
||||
totalFrames += line.code.length * frames
|
||||
} else {
|
||||
totalFrames += frames
|
||||
}
|
||||
})
|
||||
|
||||
// Set up Animation
|
||||
const [frame, setFrame] = useState(0)
|
||||
useEffect(() => {
|
||||
let interval = setInterval(() => {
|
||||
if (paused) return
|
||||
if (loop) return setFrame((frame) => frame + 1)
|
||||
if (frame + 1 < totalFrames) {
|
||||
setFrame((frame) => frame + 1)
|
||||
}
|
||||
}, frameLength)
|
||||
return () => clearInterval(interval)
|
||||
}, [frame])
|
||||
|
||||
// Reset Frames if our lines change
|
||||
useEffect(() => {
|
||||
setFrame(0)
|
||||
}, [lines])
|
||||
|
||||
const renderedLines = [...lines.slice(0, frame)]
|
||||
|
||||
return <CommandLineTerminal product="packer" lines={renderedLines} />
|
||||
}
|
33
website/components/branded-cta/index.tsx
Normal file
33
website/components/branded-cta/index.tsx
Normal file
@ -0,0 +1,33 @@
|
||||
import s from './style.module.css'
|
||||
import Button from '@hashicorp/react-button'
|
||||
|
||||
export default function BrandedCta({ heading, content, links }) {
|
||||
return (
|
||||
<div className={s.brandedCta}>
|
||||
<div className={`g-grid-container ${s.contentContainer}`}>
|
||||
<h2 className={`g-type-display-2 ${s.heading}`}>{heading}</h2>
|
||||
<div className="content-and-links">
|
||||
<p className={`g-type-body-large ${s.content}`}>{content}</p>
|
||||
<div className={s.links}>
|
||||
{links.map((link, stableIdx) => {
|
||||
return (
|
||||
<Button
|
||||
// eslint-disable-next-line react/no-array-index-key
|
||||
key={stableIdx}
|
||||
linkType={link.type || ''}
|
||||
theme={{
|
||||
variant: stableIdx === 0 ? 'primary' : 'secondary',
|
||||
brand: 'packer',
|
||||
background: 'light',
|
||||
}}
|
||||
title={link.text}
|
||||
url={link.url}
|
||||
/>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
67
website/components/branded-cta/style.module.css
Normal file
67
website/components/branded-cta/style.module.css
Normal file
@ -0,0 +1,67 @@
|
||||
.brandedCta {
|
||||
padding: 88px 0;
|
||||
background-color: var(--packer-secondary);
|
||||
background-image: url('/img/branded-cta/cta-right.svg');
|
||||
background-position: bottom right;
|
||||
background-size: auto 100%;
|
||||
background-repeat: no-repeat;
|
||||
|
||||
@media (--small) {
|
||||
background-position: bottom 0 right -130px;
|
||||
}
|
||||
|
||||
@media (462px <= width < 600px) {
|
||||
background-position: bottom 0 right -260px;
|
||||
}
|
||||
|
||||
@media (width < 462px) {
|
||||
background-position: bottom 0 right -170px;
|
||||
}
|
||||
}
|
||||
|
||||
.contentContainer {
|
||||
@media (width >= 992px) {
|
||||
display: flex;
|
||||
}
|
||||
|
||||
@media (width < 992px) {
|
||||
flex-direction: column;
|
||||
}
|
||||
}
|
||||
|
||||
.heading {
|
||||
color: var(--black);
|
||||
margin-top: 0;
|
||||
|
||||
@media (width >= 992px) {
|
||||
flex-basis: 33.3%;
|
||||
margin: 0;
|
||||
}
|
||||
}
|
||||
|
||||
.content {
|
||||
max-width: 647px;
|
||||
margin: 0;
|
||||
color: var(--gray-2);
|
||||
}
|
||||
|
||||
.content-and-links {
|
||||
@media (width >= 992px) {
|
||||
flex-basis: 66.6%;
|
||||
margin-left: 32px;
|
||||
}
|
||||
|
||||
& .g-type-body-large {
|
||||
max-width: 35em;
|
||||
}
|
||||
}
|
||||
|
||||
.links {
|
||||
margin-top: 40px;
|
||||
margin-bottom: -16px;
|
||||
|
||||
& a {
|
||||
margin-right: 16px;
|
||||
margin-bottom: 16px;
|
||||
}
|
||||
}
|
1
website/components/checklist/check-circle-filled.svg
Normal file
1
website/components/checklist/check-circle-filled.svg
Normal file
@ -0,0 +1 @@
|
||||
<svg width="18" height="18" fill="none" xmlns="http://www.w3.org/2000/svg"><path fill-rule="evenodd" clip-rule="evenodd" d="M17.25 9A8.25 8.25 0 1 1 .75 9a8.25 8.25 0 0 1 16.5 0zm-11.47.223l1.72 1.72 4.19-4.504A.75.75 0 1 1 12.75 7.5l-4.72 5.034a.748.748 0 0 1-1.06 0l-2.25-2.25a.75.75 0 1 1 1.06-1.06z" fill="#2E71E5"/></svg>
|
After Width: | Height: | Size: 326 B |
5
website/components/checklist/index.jsx
Normal file
5
website/components/checklist/index.jsx
Normal file
@ -0,0 +1,5 @@
|
||||
import s from './style.module.css'
|
||||
|
||||
export default function ChecklistWrapper({ children }) {
|
||||
return <div className={s.root}>{children}</div>
|
||||
}
|
20
website/components/checklist/style.module.css
Normal file
20
website/components/checklist/style.module.css
Normal file
@ -0,0 +1,20 @@
|
||||
.root {
|
||||
& ul {
|
||||
list-style: none;
|
||||
margin-left: 2rem;
|
||||
|
||||
& li {
|
||||
color: #525252;
|
||||
|
||||
&::before {
|
||||
content: '';
|
||||
display: block;
|
||||
width: 18px;
|
||||
margin-left: calc(-1 * 1.6rem);
|
||||
height: 18px;
|
||||
top: 7px;
|
||||
background: url('./check-circle-filled.svg');
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
48
website/components/homepage-hero/index.tsx
Normal file
48
website/components/homepage-hero/index.tsx
Normal file
@ -0,0 +1,48 @@
|
||||
import Alert from '@hashicorp/react-alert'
|
||||
import Button from '@hashicorp/react-button'
|
||||
import s from './style.module.css'
|
||||
|
||||
export default function HomepageHero({
|
||||
heading,
|
||||
heroFeature,
|
||||
subheading,
|
||||
links,
|
||||
alert,
|
||||
}) {
|
||||
return (
|
||||
<div className={s.homepageHero}>
|
||||
<div className={s.gridContainer}>
|
||||
<div className={s.content}>
|
||||
{alert ? (
|
||||
<Alert
|
||||
url={alert.url}
|
||||
tag={alert.tag}
|
||||
product="packer"
|
||||
text={alert.text}
|
||||
textColor="dark"
|
||||
/>
|
||||
) : null}
|
||||
<h1 className={s.heading}>{heading}</h1>
|
||||
<p className={s.subheading}>{subheading}</p>
|
||||
<div className={s.links}>
|
||||
{links.map((link, index) => (
|
||||
<Button
|
||||
key={link.text}
|
||||
title={link.text}
|
||||
linkType={link.type}
|
||||
url={link.url}
|
||||
theme={{
|
||||
variant: index === 0 ? 'primary' : 'secondary',
|
||||
brand: index === 0 ? 'packer' : 'neutral',
|
||||
}}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
<div className={s.heroFeature}>
|
||||
<div className={s.heroFeatureFrame}>{heroFeature}</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
90
website/components/homepage-hero/style.module.css
Normal file
90
website/components/homepage-hero/style.module.css
Normal file
@ -0,0 +1,90 @@
|
||||
.homepageHero {
|
||||
min-height: min(45vw, 600px);
|
||||
padding: 64px 0;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
justify-content: center;
|
||||
background-color: #f0fbff;
|
||||
background-image: url(/img/homepage-hero/hero-right.svg);
|
||||
background-position: bottom -250px right;
|
||||
background-repeat: no-repeat;
|
||||
background-size: contain;
|
||||
@media (--medium-up) {
|
||||
background-position: top right;
|
||||
}
|
||||
}
|
||||
|
||||
.gridContainer {
|
||||
composes: g-grid-container from global;
|
||||
position: relative;
|
||||
@media (--large) {
|
||||
display: grid;
|
||||
grid-template-columns: 1fr 1fr;
|
||||
align-items: center;
|
||||
width: 100%;
|
||||
}
|
||||
}
|
||||
|
||||
.content {
|
||||
max-width: 350px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
text-align: center;
|
||||
color: var(--gray-2);
|
||||
@media (--medium-up) {
|
||||
max-width: 550px;
|
||||
}
|
||||
@media (--large) {
|
||||
align-items: flex-start;
|
||||
text-align: left;
|
||||
width: 100%;
|
||||
}
|
||||
}
|
||||
|
||||
.heading {
|
||||
composes: g-type-display-1 from global;
|
||||
margin-top: 12px;
|
||||
margin-bottom: 24px;
|
||||
}
|
||||
|
||||
.heroFeature {
|
||||
display: none;
|
||||
position: relative;
|
||||
@media (--large) {
|
||||
display: flex;
|
||||
min-height: 500px;
|
||||
}
|
||||
}
|
||||
|
||||
.heroFeatureFrame {
|
||||
position: absolute;
|
||||
width: calc(100% + 80px);
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
.links {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
flex-wrap: wrap;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
width: 100%;
|
||||
|
||||
& a {
|
||||
margin-right: 8px;
|
||||
margin-bottom: 8px;
|
||||
}
|
||||
@media (--large) {
|
||||
width: auto;
|
||||
justify-content: flex-start;
|
||||
}
|
||||
}
|
||||
|
||||
.subheading {
|
||||
composes: g-type-body-large from global;
|
||||
color: inherit;
|
||||
margin: 0;
|
||||
font-size: 17px;
|
||||
line-height: 25px;
|
||||
margin-bottom: 40px;
|
||||
}
|
21
website/components/integrations-text-split/index.tsx
Normal file
21
website/components/integrations-text-split/index.tsx
Normal file
@ -0,0 +1,21 @@
|
||||
import TextSplitWithImage from '@hashicorp/react-text-split-with-image'
|
||||
|
||||
export default function IntegrationsTextSplit({
|
||||
heading,
|
||||
links,
|
||||
content,
|
||||
image,
|
||||
}) {
|
||||
return (
|
||||
<TextSplitWithImage
|
||||
textSplit={{
|
||||
heading,
|
||||
product: 'packer',
|
||||
content,
|
||||
linkStyle: 'buttons',
|
||||
links,
|
||||
}}
|
||||
image={image}
|
||||
/>
|
||||
)
|
||||
}
|
21
website/components/section-break-cta/index.tsx
Normal file
21
website/components/section-break-cta/index.tsx
Normal file
@ -0,0 +1,21 @@
|
||||
import Button from '@hashicorp/react-button'
|
||||
import s from './style.module.css'
|
||||
|
||||
export default function SectionBreakCta({ heading, link }) {
|
||||
return (
|
||||
<div className={s.sectionBreakCta}>
|
||||
<hr />
|
||||
<h4 className={s.heading}>{heading}</h4>
|
||||
<Button
|
||||
title={link.text}
|
||||
url={link.url}
|
||||
theme={{
|
||||
brand: 'neutral',
|
||||
variant: 'tertiary-neutral',
|
||||
background: 'light',
|
||||
}}
|
||||
linkType="outbound"
|
||||
/>
|
||||
</div>
|
||||
)
|
||||
}
|
33
website/components/section-break-cta/style.module.css
Normal file
33
website/components/section-break-cta/style.module.css
Normal file
@ -0,0 +1,33 @@
|
||||
.sectionBreakCta {
|
||||
padding: 88px 24px;
|
||||
max-width: 800px;
|
||||
display: grid;
|
||||
grid-gap: 24px;
|
||||
text-align: center;
|
||||
position: relative;
|
||||
box-shadow: 0 8px 12px rgba(37, 38, 45, 0.08);
|
||||
margin: 0 16px;
|
||||
background-color: var(--white);
|
||||
align-items: center;
|
||||
|
||||
@media (--medium-up) {
|
||||
margin: 0 auto 104px auto;
|
||||
}
|
||||
& hr {
|
||||
position: absolute;
|
||||
top: 56px;
|
||||
left: calc(50% - 32px);
|
||||
width: 64px;
|
||||
background-color: var(--packer);
|
||||
margin-top: 0;
|
||||
margin-bottom: 24px;
|
||||
}
|
||||
}
|
||||
|
||||
.heading {
|
||||
composes: g-type-display-4 from global;
|
||||
margin: 0;
|
||||
@media (--medium-up) {
|
||||
padding: 0 88px;
|
||||
}
|
||||
}
|
@ -1,71 +0,0 @@
|
||||
---
|
||||
description: The 1&1 builder is able to create images for 1&1 cloud.
|
||||
page_title: 1&1 - Builders
|
||||
---
|
||||
|
||||
# 1&1 Builder
|
||||
|
||||
Type: `oneandone`
|
||||
Artifact BuilderId: `packer.oneandone`
|
||||
|
||||
The 1&1 Builder is able to create virtual machines for
|
||||
[1&1](https://www.1and1.com/).
|
||||
|
||||
## Configuration Reference
|
||||
|
||||
There are many configuration options available for the builder. They are
|
||||
segmented below into two categories: required and optional parameters. Within
|
||||
each category, the available configuration keys are alphabetized.
|
||||
|
||||
In addition to the options listed here, a
|
||||
[communicator](/docs/templates/legacy_json_templates/communicator) can be configured for this
|
||||
builder. In addition to the options defined there, a private key file
|
||||
can also be supplied to override the typical auto-generated key:
|
||||
|
||||
@include 'packer-plugin-sdk/communicator/SSH-Private-Key-File-not-required.mdx'
|
||||
|
||||
### Required
|
||||
|
||||
- `source_image_name` (string) - 1&1 Server Appliance name of type `IMAGE`.
|
||||
|
||||
- `token` (string) - 1&1 REST API Token. This can be specified via
|
||||
environment variable `ONEANDONE_TOKEN`
|
||||
|
||||
### Optional
|
||||
|
||||
- `data_center_name` - Name of virtual data center. Possible values "ES",
|
||||
"US", "GB", "DE". Default value "US"
|
||||
|
||||
- `disk_size` (string) - Amount of disk space for this image in GB. Defaults
|
||||
to "50"
|
||||
|
||||
- `image_name` (string) - Resulting image. If "image_name" is not provided
|
||||
Packer will generate it
|
||||
|
||||
- `retries` (number) - Number of retries Packer will make status requests
|
||||
while waiting for the build to complete. Default value "600".
|
||||
|
||||
<!-- markdown-link-check-disable -->
|
||||
|
||||
- `url` (string) - Endpoint for the 1&1 REST API. Default URL
|
||||
"<https://cloudpanel-api.1and1.com/v1>"
|
||||
<!-- markdown-link-check-enable -->
|
||||
|
||||
## Example
|
||||
|
||||
Here is a basic example:
|
||||
|
||||
```json
|
||||
{
|
||||
"builders": [
|
||||
{
|
||||
"type": "oneandone",
|
||||
"disk_size": "50",
|
||||
"image_name": "test5",
|
||||
"source_image_name": "ubuntu1604-64min",
|
||||
"ssh_username": "root",
|
||||
"ssh_private_key_file": "/path/to/private/ssh/key"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
@ -98,9 +98,11 @@ working on improving this part of the transformer.
|
||||
|
||||
## Options
|
||||
|
||||
- `-output-file` - File where to put the hcl2 generated config. Defaults to
|
||||
JSON_TEMPLATE.pkr.hcl
|
||||
- `-with-annotations` - Adds helper annotations with information about the generated HCL2 blocks.
|
||||
- `-output-file` - Filename of the hcl2 generated template. Defaults to
|
||||
JSON_TEMPLATE.pkr.hcl; for example, if the file is called
|
||||
"packerparty.json", the default output-file is "packerparty.json.pkr.hcl".
|
||||
- `-with-annotations` - Adds helpful comments to the HCL template with
|
||||
information about the generated HCL2 blocks.
|
||||
|
||||
## User variables using other user variables
|
||||
|
||||
@ -128,3 +130,10 @@ locals {
|
||||
baz = "${var.foo} ${var.bar}"
|
||||
}
|
||||
```
|
||||
|
||||
## Upgrading templates that use third-party community plugins
|
||||
|
||||
If your template references a plugin that is not bundled with the main Packer
|
||||
binary, you need to make sure that the [plugin is installed](/docs/plugins#installing-plugins)
|
||||
or you will get an `unknown builder type` error. Packer needs to load the plugin
|
||||
to transpose the template.
|
||||
|
149
website/content/docs/plugins/packer-integration-program.mdx
Normal file
149
website/content/docs/plugins/packer-integration-program.mdx
Normal file
@ -0,0 +1,149 @@
|
||||
---
|
||||
description: |
|
||||
The HashiCorp Packer Integration Program allows vendors to integrate
|
||||
their products to work with Packer.
|
||||
page_title: Integration Program
|
||||
---
|
||||
|
||||
# Packer Integration Program
|
||||
|
||||
The HashiCorp Packer Integration Program allows vendors to integrate their products to work with Packer.
|
||||
|
||||
Vendors integrating their solutions via the Packer Integration Process provide their customers a verified and seamless user experience. The Packer Integration Program currently only supports coding with the Go programming language.
|
||||
|
||||
This program is intended to be largely a self-service process with links and guidance to information sources, clearly defined steps, and checkpoints.
|
||||
|
||||
### Types of Packer Integrations
|
||||
|
||||
Packer is an open source tool for creating identical machine images for multiple platforms from a single source configuration.
|
||||
Packer is lightweight, runs on every major operating system, and is highly performant, creating machine images for multiple platforms in parallel. Packer does not replace configuration management like Chef or Puppet. In fact, when building images, Packer is able to use tools like Chef or Puppet to install software onto the image.
|
||||
|
||||
A machine image is a single static unit that contains a pre-configured operating system and installed software which is used to quickly create new running machines. Machine image formats change for each platform. Some examples include AMIs for EC2, VMDK/VMX files for VMware, OVF exports for VirtualBox, etc. To know more about use cases of Packer click ([Use Cases - Introduction | Packer by HashiCorp](/#features))
|
||||
|
||||
The diagram below depicts the key Packer integration categories and types.
|
||||
|
||||
<figure style={{ margin: '3rem 0', textAlign: 'center' }}>
|
||||
<img
|
||||
src="/img/docs/packer-ecosystem-diagram.png"
|
||||
alt="Packer Ecosystem Integration Diagram"
|
||||
style={{ margin: '1rem auto', width: '460px', display: 'block' }}
|
||||
/>
|
||||
<figcaption class="g-type-body-small" style={{ color: '#6d6d6d' }}>
|
||||
Packer Ecosystem Intergation Diagram
|
||||
</figcaption>
|
||||
</figure>
|
||||
|
||||
Main Packer categories for partners to integrate with include:
|
||||
|
||||
- **Data Sources**
|
||||
- Data Sources allow users to retrieve values from a remote API and store them as variables in the Packer configuration template. An example is the [AWS secrets manager](/docs/datasources/amazon/secretsmanager) data source.
|
||||
- **Builders**
|
||||
- Builders manage the VM lifecycle. They manage launching a vm/instance, running provisioners against that instance, shutting the instance down, and saving an artifact image from that instance. Your builder handles all of the setup and cleanup costs associated with creating the output image artifact.
|
||||
- **Provisioners**
|
||||
- Provisioners are run against the instance launched by the builder. They generally provide an interface for interacting with a particular provisioning tool, such as Ansible, Puppet, or Chef.
|
||||
- **Post-Processors**
|
||||
- Post-Processors manage the image artifact after it has been created. This can be something general like running a compression tool against the artifact, or something specific like uploading it to a particular cloud service.
|
||||
|
||||
### Development Process
|
||||
|
||||
The Packer integration development process is divided into six steps. By following these steps, Packer integrations can be developed alongside HashiCorp to ensure that the integrations are able to be verified and supported in Packer as quickly as possible. A visual representation of the self-guided steps is depicted below.
|
||||
|
||||

|
||||
|
||||
The individual Packer integration steps include:
|
||||
|
||||
1. Engage: Initial contact between vendor and HashiCorp
|
||||
1. Enable: Information and articles to aid with the development of the integration
|
||||
1. Dev/Test: Integration development and test process
|
||||
1. Review: HashiCorp code review and verification of integration (iterative process)
|
||||
1. Release: Verified integration made available and listed on the HashiCorp website once the HashiCorp technology partnership agreement has been executed
|
||||
1. Support: Ongoing maintenance and support of the provider by the vendor.
|
||||
|
||||
#### 1. Engage
|
||||
|
||||
Please begin by providing some basic information about the integration that is being built via a simple [webform](https://docs.google.com/forms/d/e/1FAIpQLSfgq3HJ9Rfsi7LgPLFln28ZrmarATGlD_6A47-Io-bPUftKUw/viewform)
|
||||
|
||||
This information is recorded and used by HashiCorp to track the integration through various stages. The information is also used to notify the integration developer of any overlapping work, perhaps coming from the community so you may better focus resources.
|
||||
|
||||
Packer has an active community and ecosystem of partners that may have already started working on a similar integration. We'll do our best to connect similar parties to avoid duplicate work.
|
||||
|
||||
#### 2. Enable
|
||||
|
||||
While not mandatory, HashiCorp encourages vendors to sign an MNDA (Mutual Non-Disclosure Agreement) to allow for open dialog and sharing of ideas during the integration process.
|
||||
|
||||
In an effort to support our self-serve model we’ve included links to resources, documentation, examples and best practices to guide you through the Packer integration development and testing process.
|
||||
|
||||
- [Writing vendor extension guide](/docs/plugins/creation)
|
||||
- Sample development implemented by a [partner](https://github.com/exoscale/packer-plugin-exoscale)
|
||||
- [Scaffolding plugin repository](https://github.com/hashicorp/packer-plugin-scaffolding) to help bootstrap a new contribution:
|
||||
- Contributing to Packer [guidelines](https://github.com/hashicorp/packer/blob/master/.github/CONTRIBUTING.md)
|
||||
- [Packer developer community forum](https://discuss.hashicorp.com/c/packer/23)
|
||||
- [Packer's source code](https://github.com/hashicorp/packer)
|
||||
|
||||
We encourage vendors to closely follow the above guidance. Adopting the same structure and coding patterns helps expedite the review and release cycles.
|
||||
|
||||
#### 3. Dev & Test
|
||||
|
||||
Packer requires all code-level integrations to be written in the [Go](https://golang.org/) programming language and contain an [MPL-2.0](https://en.wikipedia.org/wiki/Mozilla_Public_License) open source license. The only knowledge necessary to write a plugin is basic command-line skills and knowledge of the Go programming language. When writing in Go-Language, HashiCorp has found the integration development process to be straightforward and simple when vendors pay close attention and follow the resources and by adopting the same structure and coding patterns helps expedite the review and release cycles. Please remember that all integration major steps should contain acceptance testing and the appropriate documentation.
|
||||
|
||||
Data Sources
|
||||
- [Custom Data Sources documentation](/docs/plugins/creation/custom-datasources)
|
||||
- [Example Data Source](https://github.com/hashicorp/packer-plugin-hashicups/tree/main/datasource)
|
||||
|
||||
Builders
|
||||
- [Custom Builders documentation](/docs/plugins/creation/custom-builders)
|
||||
- [Example Builder](https://github.com/hashicorp/packer-plugin-hashicups/tree/main/builder/order)
|
||||
|
||||
Provisioners
|
||||
- [Custom Provisioners documentation](/docs/plugins/creation/custom-provisioners)
|
||||
- [Example Provisioner](https://github.com/hashicorp/packer-plugin-hashicups/tree/main/provisioner/toppings)
|
||||
|
||||
Post-Processors
|
||||
- [Custom Post-Processors documentation](/docs/plugins/creation/custom-post-processors)
|
||||
- [Example Post-Processor](https://github.com/hashicorp/packer-plugin-hashicups/tree/main/post-processor/receipt)
|
||||
|
||||
Packer-Plugin-SDK
|
||||
- The [Packer-plugin SDK](https://github.com/hashicorp/packer-plugin-sdk) contains tools to help plugin developers with common needs, like handling SSH connections or basic plugin architecture.
|
||||
|
||||
#### 4. Review
|
||||
|
||||
During the review process, HashiCorp will provide feedback on the newly developed integration. This is an important step to allow HashiCorp to review and verify your Packer integration. Please send the integration code and other relevant logs for verification to: [Packer-integration-dev@hashicorp.com](mailto:packer-integration-dev@hashicorp.com).
|
||||
|
||||
In order to document your plugins with Packer, please submit a GitHub pull request (PR) against the [Packer project](https://github.com/hashicorp/packer). See [Registering Plugin Documentation](/docs/plugins/creation#registering-plugin-documentation) for instructions on how to register your remote plugin documentation with Packer.
|
||||
The review process can take a while to complete and may require some iterations through the code to address any problems identified by the HashiCorp team.
|
||||
|
||||
#### 5. Release
|
||||
|
||||
At this stage, it is expected that the integration is fully complete, the necessary documentation has been written, the acceptance tests have all passed, and that HashiCorp has reviewed the integration. Once the plugin has been validated and accepted by HashiCorp, the plugin can be hosted on GitHub so it can easily be [downloaded then installed within Packer](/docs/plugins/creation#creating-a-github-release).
|
||||
|
||||
Once the integration has been released the vendor is requested to sign the HashiCorp Technology Partner Agreement so that we can have their integration be listed on the HashiCorp website.
|
||||
|
||||
#### 6. Support
|
||||
|
||||
Many vendors view the release step to be the end of the journey, while at HashiCorp we view it to be the beginning of the journey. Getting the integration built is just the first step in enabling users to leverage it against their infrastructure. Once development is completed, on-going effort is required to support the developed integration, maintain the plugin and address any issues in a timely manner.
|
||||
|
||||
The expectation from the vendor/partner is to create a mechanism for them to track and resolve all issues on an ongoing basis. Vendors who choose to not support their integration will not be considered a verified integration and cannot be listed on the website.
|
||||
|
||||
### Checklist
|
||||
|
||||
Below is an ordered checklist of steps that should be followed during the integration process. This just reiterates the steps already documented in the sections above.
|
||||
|
||||
Below is an ordered checklist of steps that should be followed during the integration process. This just reiterates the steps already documented in the sections above.
|
||||
|
||||
<Checklist>
|
||||
|
||||
- Fill out the Packer integration [webform](https://docs.google.com/forms/d/e/1FAIpQLSfgq3HJ9Rfsi7LgPLFln28ZrmarATGlD_6A47-Io-bPUftKUw/viewform)
|
||||
- Execute the HashiCorp MNDA (Mutual Non-Disclosure Agreement) if needed
|
||||
- Develop and test Packer integration along with the acceptance tests and documentation
|
||||
- Send email to [packer-integration-dev@hashicorp.com](mailto:packer-integration-dev@hashicorp.com) to schedule an initial review
|
||||
- Address review feedback and finalize the development process
|
||||
- Provide HashiCorp with credentials for underlying infrastructure for test purposes
|
||||
- Demo the integration and/or send the test logs to HashiCorp at: [packer-integration-dev@hashicorp.com](mailto:packer-integration-dev@hashicorp.com)
|
||||
- Execute HashiCorp Partner Agreement Documents, review logo guidelines, partner listing and more
|
||||
- Plan to continue supporting the integration with additional functionality and responding to customer issues.
|
||||
|
||||
</Checklist>
|
||||
|
||||
### Contact Us
|
||||
|
||||
For any questions or feedback, please contact us at: packer-integration-dev@hashicorp.com.
|
@ -12,7 +12,7 @@ The top-level `source` block defines reusable builder configuration blocks:
|
||||
|
||||
`@include 'from-1.5/sources/example-block.mdx'`
|
||||
|
||||
You can start builders by refering to those source blocks form a [`build`
|
||||
You can start builders by refering to those source blocks from a [`build`
|
||||
block](/docs/templates/hcl_templates/blocks/build), for example :
|
||||
|
||||
```hcl
|
||||
|
@ -86,7 +86,7 @@ source "amazon-ebs" "server" {
|
||||
}
|
||||
```
|
||||
|
||||
## Description
|
||||
## Single `local` block
|
||||
|
||||
The `local` block defines exactly one local variable within a folder. The block
|
||||
label is the name of the local, and the "expression" is the expression that
|
||||
@ -94,6 +94,22 @@ should be evaluated to create the local. Using this block, you can optionally
|
||||
supply a "sensitive" boolean to mark the variable as sensitive and filter it
|
||||
from logs.
|
||||
|
||||
```hcl
|
||||
local "mylocal" {
|
||||
expression = "${var.secret_api_key}"
|
||||
sensitive = true
|
||||
}
|
||||
```
|
||||
|
||||
This block is also very useful for defining complex locals. Packer might take some time to expand and evaluate `locals`
|
||||
with complex expressions dependent on other locals. The `locals` block is read as a map. Maps are not sorted, and therefore
|
||||
the evaluation time is not deterministic.
|
||||
|
||||
To avoid that, singular `local` blocks should be used instead. These will be
|
||||
evaluated in the order they are defined, and the evaluation order and time will always be the same.
|
||||
|
||||
## `locals` block
|
||||
|
||||
The `locals` block defines one or more local variables within a folder.
|
||||
|
||||
The names given for the items in the `locals` block must be unique throughout a
|
||||
|
@ -125,14 +125,15 @@ Here is a full list of the available functions for reference.
|
||||
`strftime FORMAT` - UTC time, formated using the ISO C standard format
|
||||
`FORMAT`. See
|
||||
[jehiah/go-strftime](https://github.com/jehiah/go-strftime) for a list
|
||||
of available format specifier.
|
||||
of available format specifiers.
|
||||
|
||||
Please note that if you are using a large number of builders,
|
||||
provisioners or post-processors, the isotime may be slightly
|
||||
different for each one because it is from when the plugin is
|
||||
launched not the initial Packer process. In order to avoid this and make
|
||||
the timestamp consistent across all plugins, set it as a user variable
|
||||
and then access the user variable within your plugins.
|
||||
provisioners or post-processors, using the isotime engine directly in the
|
||||
plugin configuration may cause the timestamp to be slightly diffferent for
|
||||
each plugin. This is because the timestamp is generated when each plugin is
|
||||
launched rather than in the initial Packer process. In order to avoid this
|
||||
and make sure the timestamp is consistent across all plugins, set it as a user
|
||||
variable and then access the user variable within your plugins.
|
||||
|
||||
- `lower` - Lowercases the string.
|
||||
- `packer_version` - Returns Packer version.
|
||||
@ -235,6 +236,56 @@ documentation for more information on user variables.
|
||||
|
||||
# isotime Function Format Reference
|
||||
|
||||
The isotime template engine uses golang to generate timestamps. If you're
|
||||
unfamiliar with golang, then the way you format the timestamp is going to
|
||||
feel a bit unusual compared to how you may be used to formatting
|
||||
datetime strings.
|
||||
|
||||
Full docs and examples for the golang time formatting function can be found
|
||||
[here](https://golang.org/pkg/time/#example_Time_Format)
|
||||
|
||||
However, the formatting basics are worth describing here. From the [golang docs](https://golang.org/pkg/time/#pkg-constants):
|
||||
|
||||
> These are predefined layouts for use in Time.Format and time.Parse. The
|
||||
> reference time used in the layouts is the specific time:
|
||||
>
|
||||
> Mon Jan 2 15:04:05 MST 2006
|
||||
>
|
||||
> which is Unix time 1136239445. Since MST is GMT-0700, the reference time
|
||||
> can be thought of as
|
||||
>
|
||||
> 01/02 03:04:05PM '06 -0700
|
||||
>
|
||||
> To define your own format, write down what the reference time would look like
|
||||
> formatted your way; see the values of constants like ANSIC, StampMicro or
|
||||
> Kitchen for examples. The model is to demonstrate what the reference time
|
||||
> looks like so that the Format and Parse methods can apply the same
|
||||
> transformation to a general time value.
|
||||
|
||||
So what does that look like in a Packer template function? Here's an example
|
||||
of how you'd declare a variable using the isotime function.
|
||||
|
||||
```json
|
||||
"variables": {
|
||||
"myvar": "packer-{{isotime `2006-01-02 03:04:05`}}"
|
||||
}
|
||||
```
|
||||
|
||||
You can try and modify the following examples in a packer template or in
|
||||
`packer console` to get an idea of how to set different timestamps:
|
||||
|
||||
| Input | Output |
|
||||
| ------------------------------------------ | ----------- |
|
||||
| ``"packer-{{isotime `2006-01-02`}}"`` | "packer-2021-05-17 11:40:16" |
|
||||
| ``"packer-{{isotime `Jan-_2-15:04:05.000`}}"`` | "packer-May-17-23:40:16.786" |
|
||||
| ``"packer-{{isotime `3:04PM`}}"`` | "packer-11:40PM" |
|
||||
| ``"{{ isotime }}"`` | "June 7, 7:22:43pm 2014" |
|
||||
| ``"{{isotime `2006-01-02`}}"`` | "2014-06-07" |
|
||||
| ``"{{isotime `Mon 1504`}}"`` | "Sat 1922" |
|
||||
| ``"{{isotime `02-Jan-06 03\_04\_05`}}"`` | "07-Jun-2014 07\_22\_43" |
|
||||
| ``"{{isotime `Hour15Year200603`}}"`` | "Hour19Year201407" |
|
||||
|
||||
|
||||
Formatting for the function `isotime` uses the magic reference date **Mon Jan 2
|
||||
15:04:05 -0700 MST 2006**, which breaks down to the following:
|
||||
|
||||
@ -278,47 +329,9 @@ Formatting for the function `isotime` uses the magic reference date **Mon Jan 2
|
||||
|
||||
_The values in parentheses are the abbreviated, or 24-hour clock values_
|
||||
|
||||
For those unfamiliar with GO date/time formatting, here is a link to the
|
||||
documentation: [go date/time formatting](https://programming.guide/go/format-parse-string-time-date-example.html)
|
||||
|
||||
Note that "-0700" is always formatted into "+0000" because `isotime` is always
|
||||
UTC time.
|
||||
|
||||
Here are some example formatted time, using the above format options:
|
||||
|
||||
```liquid
|
||||
isotime = June 7, 7:22:43pm 2014
|
||||
|
||||
{{isotime "2006-01-02"}} = 2014-06-07
|
||||
{{isotime "Mon 1504"}} = Sat 1922
|
||||
{{isotime "02-Jan-06 03\_04\_05"}} = 07-Jun-2014 07\_22\_43
|
||||
{{isotime "Hour15Year200603"}} = Hour19Year201407
|
||||
```
|
||||
|
||||
Please note that double quote characters need escaping inside of templates (in
|
||||
this case, on the `ami_name` value):
|
||||
|
||||
```json
|
||||
{
|
||||
"builders": [
|
||||
{
|
||||
"type": "amazon-ebs",
|
||||
"access_key": "...",
|
||||
"secret_key": "...",
|
||||
"region": "us-east-1",
|
||||
"source_ami": "ami-fce3c696",
|
||||
"instance_type": "t2.micro",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "packer {{isotime \"2006-01-02\"}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
-> **Note:** See the [Amazon builder](/docs/builders/amazon)
|
||||
documentation for more information on how to correctly configure the Amazon
|
||||
builder in this example.
|
||||
|
||||
# split Function Format Reference
|
||||
|
||||
The function `split` takes an input string, a seperator string, and a numeric
|
||||
|
@ -79,6 +79,74 @@ key with an underscore. Example:
|
||||
**Important:** Only _root level_ keys can be underscore prefixed. Keys within
|
||||
builders, provisioners, etc. will still result in validation errors.
|
||||
|
||||
-> **Note:** Packer supports HCL2 from version 1.6.0. The Hashicorp
|
||||
Configuration Language does support comments anywhere in template files.
|
||||
If comments are important to you, consider upgrading your
|
||||
JSON template to HCL2 using the `packer hcl2_upgrade` command.
|
||||
|
||||
One workaround if you are not ready to upgrade to HCL is to use jq to strip
|
||||
unsupported comments from a Packer template before you run `packer build`.
|
||||
|
||||
For example, here is a file named `commented_template.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"_comment": ["this is", "a multi-line", "comment"],
|
||||
"builders": [
|
||||
{
|
||||
"_comment": "this is a comment inside a builder",
|
||||
"type": "null",
|
||||
"communicator": "none"
|
||||
}
|
||||
],
|
||||
"_comment": "this is a root level comment",
|
||||
"provisioners": [
|
||||
{
|
||||
"_comment": "this is a different comment",
|
||||
"type": "shell",
|
||||
"_comment": "this is yet another comment",
|
||||
"inline": ["echo hellooooo"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
If you use the following jq command:
|
||||
|
||||
```shell-session
|
||||
$ jq 'walk(if type == "object" then del(._comment) else . end)' commented_template.json > uncommented_template.json
|
||||
```
|
||||
|
||||
The tool will produce a new file containing:
|
||||
|
||||
```json
|
||||
{
|
||||
"builders": [
|
||||
{
|
||||
"type": "null",
|
||||
"communicator": "none"
|
||||
}
|
||||
],
|
||||
"provisioners": [
|
||||
{
|
||||
"type": "shell",
|
||||
"inline": ["echo hellooooo"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Once you've got your uncommented file, you can call `packer build` on it like
|
||||
you normally would.
|
||||
|
||||
If your install of jq does not have the walk function and you get an error like
|
||||
|
||||
```text
|
||||
jq: error: walk/1 is not defined at <top-level>,
|
||||
```
|
||||
|
||||
You can create a file `~/.jq` and add the [walk function](https://github.com/stedolan/jq/blob/ad9fc9f559e78a764aac20f669f23cdd020cd943/src/builtin.jq#L255-L262) to it by hand.
|
||||
|
||||
## Example Template
|
||||
|
||||
Below is an example of a basic template that could be invoked with
|
||||
|
@ -1,11 +0,0 @@
|
||||
---
|
||||
page_title: Tips and Tricks
|
||||
description: |-
|
||||
The guides stored in this section are miscellanious tips and tricks that might
|
||||
make your experience of using Packer easier
|
||||
---
|
||||
|
||||
# Tips and Tricks
|
||||
|
||||
Click the sidebar navigation to check out the miscellaneous guides we have for
|
||||
making your life with Packer just a bit easier.
|
@ -1,84 +0,0 @@
|
||||
---
|
||||
page_title: Using the isotime template function - Guides
|
||||
description: |-
|
||||
It can be a bit confusing to figure out how to format your isotime using the
|
||||
golang reference date string. Here is a small guide and some examples.
|
||||
---
|
||||
|
||||
# Using the Isotime template function with a format string
|
||||
|
||||
The way you format isotime in golang is a bit nontraditional compared to how
|
||||
you may be used to formatting datetime strings.
|
||||
|
||||
Full docs and examples for the golang time formatting function can be found
|
||||
[here](https://golang.org/pkg/time/#example_Time_Format)
|
||||
|
||||
However, the formatting basics are worth describing here. From the [golang docs](https://golang.org/pkg/time/#pkg-constants):
|
||||
|
||||
> These are predefined layouts for use in Time.Format and time.Parse. The
|
||||
> reference time used in the layouts is the specific time:
|
||||
>
|
||||
> Mon Jan 2 15:04:05 MST 2006
|
||||
>
|
||||
> which is Unix time 1136239445. Since MST is GMT-0700, the reference time
|
||||
> can be thought of as
|
||||
>
|
||||
> 01/02 03:04:05PM '06 -0700
|
||||
>
|
||||
> To define your own format, write down what the reference time would look like
|
||||
> formatted your way; see the values of constants like ANSIC, StampMicro or
|
||||
> Kitchen for examples. The model is to demonstrate what the reference time
|
||||
> looks like so that the Format and Parse methods can apply the same
|
||||
> transformation to a general time value.
|
||||
|
||||
So what does that look like in a Packer template function?
|
||||
|
||||
```json
|
||||
{
|
||||
"variables": {
|
||||
"myvar": "packer-{{isotime \"2006-01-02 03:04:05\"}}"
|
||||
},
|
||||
"builders": [
|
||||
{
|
||||
"type": "null",
|
||||
"communicator": "none"
|
||||
}
|
||||
],
|
||||
"provisioners": [
|
||||
{
|
||||
"type": "shell-local",
|
||||
"inline": ["echo {{ user `myvar`}}"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
You can switch out the variables section above with the following examples to
|
||||
get different timestamps:
|
||||
|
||||
Date only, not time:
|
||||
|
||||
```json
|
||||
"variables":
|
||||
{
|
||||
"myvar": "packer-{{isotime \"2006-01-02\"}}"
|
||||
}
|
||||
```
|
||||
|
||||
A timestamp down to the millisecond:
|
||||
|
||||
```json
|
||||
"variables":
|
||||
{
|
||||
"myvar": "packer-{{isotime \"Jan-_2-15:04:05.000\"}}"
|
||||
}
|
||||
```
|
||||
|
||||
Or just the time as it would appear on a digital clock:
|
||||
|
||||
```json
|
||||
"variables":
|
||||
{
|
||||
"myvar": "packer-{{isotime \"3:04PM\"}}"
|
||||
}
|
||||
```
|
@ -1,75 +0,0 @@
|
||||
---
|
||||
page_title: Use jq and Packer to comment your templates - Guides
|
||||
description: |-
|
||||
You can add detailed comments beyond the root-level underscore-prefixed field
|
||||
supported by Packer, and remove them using jq.
|
||||
---
|
||||
|
||||
# How to use jq to strip unsupported comments from a Packer template
|
||||
|
||||
-> **Note:** Packer supports HCL2 from version 1.6.0, the Hashicorp Configuration
|
||||
Language allows to comment directly in template files. Consider upgrading your
|
||||
JSON template to HCL2 using the `packer hcl2_upgrade` command.
|
||||
|
||||
One of the biggest complaints we get about Packer is that JSON doesn't use comments.
|
||||
For Packer JSON templates, you can add detailed comments beyond the root-level underscore-prefixed field supported by Packer, and remove them using jq.
|
||||
|
||||
Let's say we have a file named `commented_template.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"_comment": ["this is", "a multi-line", "comment"],
|
||||
"builders": [
|
||||
{
|
||||
"_comment": "this is a comment inside a builder",
|
||||
"type": "null",
|
||||
"communicator": "none"
|
||||
}
|
||||
],
|
||||
"_comment": "this is a root level comment",
|
||||
"provisioners": [
|
||||
{
|
||||
"_comment": "this is a different comment",
|
||||
"type": "shell",
|
||||
"_comment": "this is yet another comment",
|
||||
"inline": ["echo hellooooo"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
```shell-session
|
||||
$ jq 'walk(if type == "object" then del(._comment) else . end)' commented_template.json > uncommented_template.json
|
||||
```
|
||||
|
||||
will produce a new file containing:
|
||||
|
||||
```json
|
||||
{
|
||||
"builders": [
|
||||
{
|
||||
"type": "null",
|
||||
"communicator": "none"
|
||||
}
|
||||
],
|
||||
"provisioners": [
|
||||
{
|
||||
"type": "shell",
|
||||
"inline": ["echo hellooooo"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Once you've got your uncommented file, you can call `packer build` on it like
|
||||
you normally would.
|
||||
|
||||
## The walk function
|
||||
|
||||
If your install of jq does not have the walk function and you get an error like
|
||||
|
||||
```text
|
||||
jq: error: walk/1 is not defined at <top-level>,
|
||||
```
|
||||
|
||||
You can create a file `~/.jq` and add the [walk function](https://github.com/stedolan/jq/blob/ad9fc9f559e78a764aac20f669f23cdd020cd943/src/builtin.jq#L255-L262) to it by hand.
|
@ -1,64 +0,0 @@
|
||||
---
|
||||
page_title: Convert Veewee Definitions to Packer Templates - Guides
|
||||
description: |-
|
||||
If you are or were a user of Veewee, then there is an official tool called
|
||||
veewee-to-packer that will convert your Veewee definition into an equivalent
|
||||
Packer template. Even if you're not a Veewee user, Veewee has a large library
|
||||
of templates that can be readily used with Packer by simply converting them.
|
||||
---
|
||||
|
||||
# Veewee-to-Packer
|
||||
|
||||
If you are or were a user of [Veewee](https://github.com/jedi4ever/veewee), then
|
||||
there is an official tool called
|
||||
[veewee-to-packer](https://github.com/mitchellh/veewee-to-packer) that will
|
||||
convert your Veewee definition into an equivalent Packer template. Even if
|
||||
you're not a Veewee user, Veewee has a [large
|
||||
library](https://github.com/jedi4ever/veewee/tree/master/templates) of templates
|
||||
that can be readily used with Packer by simply converting them.
|
||||
|
||||
## Installation and Usage
|
||||
|
||||
Since Veewee itself is a Ruby project, so too is the veewee-to-packer
|
||||
application so that it can read the Veewee configurations. Install it using
|
||||
RubyGems:
|
||||
|
||||
```shell-session
|
||||
$ gem install veewee-to-packer
|
||||
# ...
|
||||
```
|
||||
|
||||
Once installed, just point `veewee-to-packer` at the `definition.rb` file of any
|
||||
template. The converter will output any warnings or messages about the conversion.
|
||||
The example below converts a CentOS template:
|
||||
|
||||
```shell-session
|
||||
$ veewee-to-packer templates/CentOS-6.4/definition.rb
|
||||
Success! Your Veewee definition was converted to a Packer
|
||||
template! The template can be found in the `template.json` file
|
||||
in the output directory: output
|
||||
|
||||
Please be sure to run `packer validate` against the new template
|
||||
to verify settings are correct. Be sure to `cd` into the directory
|
||||
first, since the template has relative paths that expect you to
|
||||
use it from the same working directory.
|
||||
```
|
||||
|
||||
**_Voila!_** By default, `veewee-to-packer` will output a template that contains
|
||||
a builder for both VirtualBox and VMware. You can use the `-only` flag on
|
||||
`packer build` to only build one of them. Otherwise you can use the `--builder`
|
||||
flag on `veewee-to-packer` to only output specific builder configurations.
|
||||
|
||||
## Limitations
|
||||
|
||||
None, really. The tool will tell you if it can't convert a part of a template,
|
||||
and whether that is a critical error or just a warning. Most of Veewee's
|
||||
functions translate perfectly over to Packer. There are still a couple missing
|
||||
features in Packer, but they're minimal.
|
||||
|
||||
## Bugs
|
||||
|
||||
If you find any bugs, please report them to the [veewee-to-packer issue
|
||||
tracker](https://github.com/mitchellh/veewee-to-packer). I haven't been able to
|
||||
exhaustively test every Veewee template, so there are certainly some edge cases
|
||||
out there.
|
@ -1,18 +0,0 @@
|
||||
<!-- Code generated from the comments of the AlicloudAccessConfig struct in builder/alicloud/ecs/access_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `skip_region_validation` (bool) - The region validation can be skipped if this value is true, the default
|
||||
value is false.
|
||||
|
||||
- `skip_image_validation` (bool) - The image validation can be skipped if this value is true, the default
|
||||
value is false.
|
||||
|
||||
- `profile` (string) - Alicloud profile must be set unless `access_key` is set; it can also be
|
||||
sourced from the `ALICLOUD_PROFILE` environment variable.
|
||||
|
||||
- `shared_credentials_file` (string) - Alicloud shared credentials file path. If this file exists, access and
|
||||
secret keys will be read from this file.
|
||||
|
||||
- `security_token` (string) - STS access token, can be set through template or by exporting as
|
||||
environment variable such as `export SECURITY_TOKEN=value`.
|
||||
|
||||
<!-- End of code generated from the comments of the AlicloudAccessConfig struct in builder/alicloud/ecs/access_config.go; -->
|
@ -1,12 +0,0 @@
|
||||
<!-- Code generated from the comments of the AlicloudAccessConfig struct in builder/alicloud/ecs/access_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `access_key` (string) - Alicloud access key must be provided unless `profile` is set, but it can
|
||||
also be sourced from the `ALICLOUD_ACCESS_KEY` environment variable.
|
||||
|
||||
- `secret_key` (string) - Alicloud secret key must be provided unless `profile` is set, but it can
|
||||
also be sourced from the `ALICLOUD_SECRET_KEY` environment variable.
|
||||
|
||||
- `region` (string) - Alicloud region must be provided unless `profile` is set, but it can
|
||||
also be sourced from the `ALICLOUD_REGION` environment variable.
|
||||
|
||||
<!-- End of code generated from the comments of the AlicloudAccessConfig struct in builder/alicloud/ecs/access_config.go; -->
|
@ -1,5 +0,0 @@
|
||||
<!-- Code generated from the comments of the AlicloudAccessConfig struct in builder/alicloud/ecs/access_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
Config of alicloud
|
||||
|
||||
<!-- End of code generated from the comments of the AlicloudAccessConfig struct in builder/alicloud/ecs/access_config.go; -->
|
@ -1,42 +0,0 @@
|
||||
<!-- Code generated from the comments of the AlicloudDiskDevice struct in builder/alicloud/ecs/image_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `disk_name` (string) - The value of disk name is blank by default. [2,
|
||||
128] English or Chinese characters, must begin with an
|
||||
uppercase/lowercase letter or Chinese character. Can contain numbers,
|
||||
., _ and -. The disk name will appear on the console. It cannot
|
||||
begin with `http://` or `https://`.
|
||||
|
||||
- `disk_category` (string) - Category of the system disk. Optional values are:
|
||||
- cloud - general cloud disk
|
||||
- cloud_efficiency - efficiency cloud disk
|
||||
- cloud_ssd - cloud SSD
|
||||
|
||||
- `disk_size` (int) - Size of the system disk, measured in GiB. Value
|
||||
range: [20, 500]. The specified value must be equal to or greater
|
||||
than max{20, ImageSize}. Default value: max{40, ImageSize}.
|
||||
|
||||
- `disk_snapshot_id` (string) - Snapshots are used to create the data
|
||||
disk After this parameter is specified, Size is ignored. The actual
|
||||
size of the created disk is the size of the specified snapshot.
|
||||
This field is only used in the ECSImagesDiskMappings option, not
|
||||
the ECSSystemDiskMapping option.
|
||||
|
||||
- `disk_description` (string) - The value of disk description is blank by
|
||||
default. [2, 256] characters. The disk description will appear on the
|
||||
console. It cannot begin with `http://` or `https://`.
|
||||
|
||||
- `disk_delete_with_instance` (bool) - Whether or not the disk is
|
||||
released along with the instance:
|
||||
|
||||
- `disk_device` (string) - Device information of the related instance:
|
||||
such as /dev/xvdb It is null unless the Status is In_use.
|
||||
|
||||
- `disk_encrypted` (boolean) - Whether or not to encrypt the data disk.
|
||||
If this option is set to true, the data disk will be encryped and
|
||||
corresponding snapshot in the target image will also be encrypted. By
|
||||
default, if this is an extra data disk, Packer will not encrypt the
|
||||
data disk. Otherwise, Packer will keep the encryption setting to what
|
||||
it was in the source image. Please refer to Introduction of ECS disk
|
||||
encryption for more details.
|
||||
|
||||
<!-- End of code generated from the comments of the AlicloudDiskDevice struct in builder/alicloud/ecs/image_config.go; -->
|
@ -1,6 +0,0 @@
|
||||
<!-- Code generated from the comments of the AlicloudDiskDevice struct in builder/alicloud/ecs/image_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
The "AlicloudDiskDevice" object us used for the `ECSSystemDiskMapping` and
|
||||
`ECSImagesDiskMappings` options, and contains the following fields:
|
||||
|
||||
<!-- End of code generated from the comments of the AlicloudDiskDevice struct in builder/alicloud/ecs/image_config.go; -->
|
@ -1,37 +0,0 @@
|
||||
<!-- Code generated from the comments of the AlicloudDiskDevices struct in builder/alicloud/ecs/image_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `system_disk_mapping` (AlicloudDiskDevice) - Image disk mapping for the system disk.
|
||||
See the [disk device configuration](#disk-devices-configuration) section
|
||||
for more information on options.
|
||||
Usage example:
|
||||
|
||||
```json
|
||||
"builders": [{
|
||||
"type":"alicloud-ecs",
|
||||
"system_disk_mapping": {
|
||||
"disk_size": 50,
|
||||
"disk_name": "mydisk"
|
||||
},
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
- `image_disk_mappings` ([]AlicloudDiskDevice) - Add one or more data disks to the image.
|
||||
See the [disk device configuration](#disk-devices-configuration) section
|
||||
for more information on options.
|
||||
Usage example:
|
||||
|
||||
```json
|
||||
"builders": [{
|
||||
"type":"alicloud-ecs",
|
||||
"image_disk_mappings": [
|
||||
{
|
||||
"disk_snapshot_id": "someid",
|
||||
"disk_device": "dev/xvdb"
|
||||
}
|
||||
],
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
<!-- End of code generated from the comments of the AlicloudDiskDevices struct in builder/alicloud/ecs/image_config.go; -->
|
@ -1,6 +0,0 @@
|
||||
<!-- Code generated from the comments of the AlicloudDiskDevices struct in builder/alicloud/ecs/image_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
The "AlicloudDiskDevices" object is used to define disk mappings for your
|
||||
instance.
|
||||
|
||||
<!-- End of code generated from the comments of the AlicloudDiskDevices struct in builder/alicloud/ecs/image_config.go; -->
|
@ -1,61 +0,0 @@
|
||||
<!-- Code generated from the comments of the AlicloudImageConfig struct in builder/alicloud/ecs/image_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `image_version` (string) - The version number of the image, with a length limit of 1 to 40 English
|
||||
characters.
|
||||
|
||||
- `image_description` (string) - The description of the image, with a length limit of 0 to 256
|
||||
characters. Leaving it blank means null, which is the default value. It
|
||||
cannot begin with `http://` or `https://`.
|
||||
|
||||
- `image_share_account` ([]string) - The IDs of to-be-added Aliyun accounts to which the image is shared. The
|
||||
number of accounts is 1 to 10. If number of accounts is greater than 10,
|
||||
this parameter is ignored.
|
||||
|
||||
- `image_unshare_account` ([]string) - Alicloud Image UN Share Accounts
|
||||
|
||||
- `image_copy_regions` ([]string) - Copy to the destination regionIds.
|
||||
|
||||
- `image_copy_names` ([]string) - The name of the destination image, [2, 128] English or Chinese
|
||||
characters. It must begin with an uppercase/lowercase letter or a
|
||||
Chinese character, and may contain numbers, _ or -. It cannot begin with
|
||||
`http://` or `https://`.
|
||||
|
||||
- `image_encrypted` (boolean) - Whether or not to encrypt the target images, including those
|
||||
copied if image_copy_regions is specified. If this option is set to
|
||||
true, a temporary image will be created from the provisioned instance in
|
||||
the main region and an encrypted copy will be generated in the same
|
||||
region. By default, Packer will keep the encryption setting to what it
|
||||
was in the source image.
|
||||
|
||||
- `image_force_delete` (bool) - If this value is true, when the target image names including those
|
||||
copied are duplicated with existing images, it will delete the existing
|
||||
images and then create the target images, otherwise, the creation will
|
||||
fail. The default value is false. Check `image_name` and
|
||||
`image_copy_names` options for names of target images. If
|
||||
[-force](/docs/commands/build#force) option is provided in `build`
|
||||
command, this option can be omitted and taken as true.
|
||||
|
||||
- `image_force_delete_snapshots` (bool) - If this value is true, when delete the duplicated existing images, the
|
||||
source snapshots of those images will be delete either. If
|
||||
[-force](/docs/commands/build#force) option is provided in `build`
|
||||
command, this option can be omitted and taken as true.
|
||||
|
||||
- `image_force_delete_instances` (bool) - Alicloud Image Force Delete Instances
|
||||
|
||||
- `image_ignore_data_disks` (bool) - If this value is true, the image created will not include any snapshot
|
||||
of data disks. This option would be useful for any circumstance that
|
||||
default data disks with instance types are not concerned. The default
|
||||
value is false.
|
||||
|
||||
- `skip_region_validation` (bool) - The region validation can be skipped if this value is true, the default
|
||||
value is false.
|
||||
|
||||
- `tags` (map[string]string) - Key/value pair tags applied to the destination image and relevant
|
||||
snapshots.
|
||||
|
||||
- `tag` ([]{key string, value string}) - Same as [`tags`](#tags) but defined as a singular repeatable block
|
||||
containing a `key` and a `value` field. In HCL2 mode the
|
||||
[`dynamic_block`](/docs/templates/hcl_templates/expressions#dynamic-blocks)
|
||||
will allow you to create those programatically.
|
||||
|
||||
<!-- End of code generated from the comments of the AlicloudImageConfig struct in builder/alicloud/ecs/image_config.go; -->
|
@ -1,8 +0,0 @@
|
||||
<!-- Code generated from the comments of the AlicloudImageConfig struct in builder/alicloud/ecs/image_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `image_name` (string) - The name of the user-defined image, [2, 128] English or Chinese
|
||||
characters. It must begin with an uppercase/lowercase letter or a
|
||||
Chinese character, and may contain numbers, `_` or `-`. It cannot begin
|
||||
with `http://` or `https://`.
|
||||
|
||||
<!-- End of code generated from the comments of the AlicloudImageConfig struct in builder/alicloud/ecs/image_config.go; -->
|
@ -1,99 +0,0 @@
|
||||
<!-- Code generated from the comments of the RunConfig struct in builder/alicloud/ecs/run_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `associate_public_ip_address` (bool) - Associate Public Ip Address
|
||||
|
||||
- `zone_id` (string) - ID of the zone to which the disk belongs.
|
||||
|
||||
- `io_optimized` (boolean) - Whether an ECS instance is I/O optimized or not. If this option is not
|
||||
provided, the value will be determined by product API according to what
|
||||
`instance_type` is used.
|
||||
|
||||
- `description` (string) - Description
|
||||
|
||||
- `force_stop_instance` (bool) - Whether to force shutdown upon device
|
||||
restart. The default value is `false`.
|
||||
|
||||
If it is set to `false`, the system is shut down normally; if it is set to
|
||||
`true`, the system is forced to shut down.
|
||||
|
||||
- `disable_stop_instance` (bool) - If this option is set to true, Packer
|
||||
will not stop the instance for you, and you need to make sure the instance
|
||||
will be stopped in the final provisioner command. Otherwise, Packer will
|
||||
timeout while waiting the instance to be stopped. This option is provided
|
||||
for some specific scenarios that you want to stop the instance by yourself.
|
||||
E.g., Sysprep a windows which may shutdown the instance within its command.
|
||||
The default value is false.
|
||||
|
||||
- `ram_role_name` (string) - Ram Role to apply when launching the instance.
|
||||
|
||||
- `security_group_id` (string) - ID of the security group to which a newly
|
||||
created instance belongs. Mutual access is allowed between instances in one
|
||||
security group. If not specified, the newly created instance will be added
|
||||
to the default security group. If the default group doesn’t exist, or the
|
||||
number of instances in it has reached the maximum limit, a new security
|
||||
group will be created automatically.
|
||||
|
||||
- `security_group_name` (string) - The security group name. The default value
|
||||
is blank. [2, 128] English or Chinese characters, must begin with an
|
||||
uppercase/lowercase letter or Chinese character. Can contain numbers, .,
|
||||
_ or -. It cannot begin with `http://` or `https://`.
|
||||
|
||||
- `user_data` (string) - User data to apply when launching the instance. Note
|
||||
that you need to be careful about escaping characters due to the templates
|
||||
being JSON. It is often more convenient to use user_data_file, instead.
|
||||
Packer will not automatically wait for a user script to finish before
|
||||
shutting down the instance this must be handled in a provisioner.
|
||||
|
||||
- `user_data_file` (string) - Path to a file that will be used for the user
|
||||
data when launching the instance.
|
||||
|
||||
- `vpc_id` (string) - VPC ID allocated by the system.
|
||||
|
||||
- `vpc_name` (string) - The VPC name. The default value is blank. [2, 128]
|
||||
English or Chinese characters, must begin with an uppercase/lowercase
|
||||
letter or Chinese character. Can contain numbers, _ and -. The disk
|
||||
description will appear on the console. Cannot begin with `http://` or
|
||||
`https://`.
|
||||
|
||||
- `vpc_cidr_block` (string) - Value options: 192.168.0.0/16 and
|
||||
172.16.0.0/16. When not specified, the default value is 172.16.0.0/16.
|
||||
|
||||
- `vswitch_id` (string) - The ID of the VSwitch to be used.
|
||||
|
||||
- `vswitch_name` (string) - The ID of the VSwitch to be used.
|
||||
|
||||
- `instance_name` (string) - Display name of the instance, which is a string of 2 to 128 Chinese or
|
||||
English characters. It must begin with an uppercase/lowercase letter or
|
||||
a Chinese character and can contain numerals, `.`, `_`, or `-`. The
|
||||
instance name is displayed on the Alibaba Cloud console. If this
|
||||
parameter is not specified, the default value is InstanceId of the
|
||||
instance. It cannot begin with `http://` or `https://`.
|
||||
|
||||
- `internet_charge_type` (string) - Internet charge type, which can be
|
||||
`PayByTraffic` or `PayByBandwidth`. Optional values:
|
||||
- `PayByBandwidth`
|
||||
- `PayByTraffic`
|
||||
|
||||
If this parameter is not specified, the default value is `PayByBandwidth`.
|
||||
For the regions out of China, currently only support `PayByTraffic`, you
|
||||
must set it manfully.
|
||||
|
||||
- `internet_max_bandwidth_out` (int) - Maximum outgoing bandwidth to the
|
||||
public network, measured in Mbps (Mega bits per second).
|
||||
|
||||
Value range:
|
||||
- `PayByBandwidth`: \[0, 100\]. If this parameter is not specified, API
|
||||
automatically sets it to 0 Mbps.
|
||||
- `PayByTraffic`: \[1, 100\]. If this parameter is not specified, an
|
||||
error is returned.
|
||||
|
||||
- `wait_snapshot_ready_timeout` (int) - Timeout of creating snapshot(s).
|
||||
The default timeout is 3600 seconds if this option is not set or is set
|
||||
to 0. For those disks containing lots of data, it may require a higher
|
||||
timeout value.
|
||||
|
||||
- `ssh_private_ip` (bool) - If this value is true, packer will connect to
|
||||
the ECS created through private ip instead of allocating a public ip or an
|
||||
EIP. The default value is false.
|
||||
|
||||
<!-- End of code generated from the comments of the RunConfig struct in builder/alicloud/ecs/run_config.go; -->
|
@ -1,13 +0,0 @@
|
||||
<!-- Code generated from the comments of the RunConfig struct in builder/alicloud/ecs/run_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `instance_type` (string) - Type of the instance. For values, see [Instance Type
|
||||
Table](https://www.alibabacloud.com/help/doc-detail/25378.htm?spm=a3c0i.o25499en.a3.9.14a36ac8iYqKRA).
|
||||
You can also obtain the latest instance type table by invoking the
|
||||
[Querying Instance Type
|
||||
Table](https://intl.aliyun.com/help/doc-detail/25620.htm?spm=a3c0i.o25499en.a3.6.Dr1bik)
|
||||
interface.
|
||||
|
||||
- `source_image` (string) - This is the base image id which you want to
|
||||
create your customized images.
|
||||
|
||||
<!-- End of code generated from the comments of the RunConfig struct in builder/alicloud/ecs/run_config.go; -->
|
@ -1,111 +0,0 @@
|
||||
<!-- Code generated from the comments of the Config struct in builder/cloudstack/config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `async_timeout` (duration string | ex: "1h5m2s") - The time duration to wait for async calls to
|
||||
finish. Defaults to 30m.
|
||||
|
||||
- `http_get_only` (bool) - Some cloud providers only allow HTTP GET calls
|
||||
to their CloudStack API. If using such a provider, you need to set this to
|
||||
true in order for the provider to only make GET calls and no POST calls.
|
||||
|
||||
- `ssl_no_verify` (bool) - Set to true to skip SSL verification.
|
||||
Defaults to false.
|
||||
|
||||
- `cidr_list` ([]string) - List of CIDR's that will have access to the new
|
||||
instance. This is needed in order for any provisioners to be able to
|
||||
connect to the instance. Defaults to [ "0.0.0.0/0" ]. Only required when
|
||||
use_local_ip_address is false.
|
||||
|
||||
- `create_security_group` (bool) - If true a temporary security group
|
||||
will be created which allows traffic towards the instance from the
|
||||
cidr_list. This option will be ignored if security_groups is also
|
||||
defined. Requires expunge set to true. Defaults to false.
|
||||
|
||||
- `disk_offering` (string) - The name or ID of the disk offering used for the
|
||||
instance. This option is only available (and also required) when using
|
||||
source_iso.
|
||||
|
||||
- `disk_size` (int64) - The size (in GB) of the root disk of the new
|
||||
instance. This option is only available when using source_template.
|
||||
|
||||
- `eject_iso` (bool) - If `true` make a call to the CloudStack API, after loading image to
|
||||
cache, requesting to check and detach ISO file (if any) currently
|
||||
attached to a virtual machine. Defaults to `false`. This option is only
|
||||
available when using `source_iso`.
|
||||
|
||||
- `eject_iso_delay` (duration string | ex: "1h5m2s") - Configure the duration time to wait, making sure virtual machine is able
|
||||
to finish installing OS before it ejects safely. Requires `eject_iso`
|
||||
set to `true` and this option is only available when using `source_iso`.
|
||||
|
||||
- `expunge` (bool) - Set to true to expunge the instance when it is
|
||||
destroyed. Defaults to false.
|
||||
|
||||
- `hypervisor` (string) - The target hypervisor (e.g. XenServer, KVM) for
|
||||
the new template. This option is required when using source_iso.
|
||||
|
||||
- `instance_name` (string) - The name of the instance. Defaults to
|
||||
"packer-UUID" where UUID is dynamically generated.
|
||||
|
||||
- `instance_display_name` (string) - The display name of the instance. Defaults to "Created by Packer".
|
||||
|
||||
- `project` (string) - The name or ID of the project to deploy the instance
|
||||
to.
|
||||
|
||||
- `public_ip_address` (string) - The public IP address or it's ID used for
|
||||
connecting any provisioners to. If not provided, a temporary public IP
|
||||
address will be associated and released during the Packer run.
|
||||
|
||||
- `public_port` (int) - The fixed port you want to configure in the port
|
||||
forwarding rule. Set this attribute if you do not want to use the a random
|
||||
public port.
|
||||
|
||||
- `security_groups` ([]string) - A list of security group IDs or
|
||||
names to associate the instance with.
|
||||
|
||||
- `prevent_firewall_changes` (bool) - Set to true to prevent network
|
||||
ACLs or firewall rules creation. Defaults to false.
|
||||
|
||||
- `temporary_keypair_name` (string) - The name of the temporary SSH key pair
|
||||
to generate. By default, Packer generates a name that looks like
|
||||
`packer_<UUID>`, where `<UUID>` is a 36 character unique identifier.
|
||||
|
||||
- `use_local_ip_address` (bool) - Set to true to indicate that the
|
||||
provisioners should connect to the local IP address of the instance.
|
||||
|
||||
- `user_data` (string) - User data to launch with the instance. This is a
|
||||
template engine; see "User Data" below for
|
||||
more details. Packer will not automatically wait for a user script to
|
||||
finish before shutting down the instance this must be handled in a
|
||||
provisioner.
|
||||
|
||||
- `user_data_file` (string) - Path to a file that will be used for the user
|
||||
data when launching the instance. This file will be parsed as a template
|
||||
engine see User Data below for more
|
||||
details.
|
||||
|
||||
- `template_name` (string) - The name of the new template. Defaults to
|
||||
`packer-{{timestamp}}` where timestamp will be the current time.
|
||||
|
||||
- `template_display_text` (string) - The display text of the new template.
|
||||
Defaults to the template_name.
|
||||
|
||||
- `template_featured` (bool) - Set to true to indicate that the template
|
||||
is featured. Defaults to false.
|
||||
|
||||
- `template_public` (bool) - Set to true to indicate that the template
|
||||
is available for all accounts. Defaults to false.
|
||||
|
||||
- `template_password_enabled` (bool) - Set to true to indicate the
|
||||
template should be password enabled. Defaults to false.
|
||||
|
||||
- `template_requires_hvm` (bool) - Set to true to indicate the template
|
||||
requires hardware-assisted virtualization. Defaults to false.
|
||||
|
||||
- `template_scalable` (bool) - Set to true to indicate that the template
|
||||
contains tools to support dynamic scaling of VM cpu/memory. Defaults to
|
||||
false.
|
||||
|
||||
- `template_tag` (string) -
|
||||
|
||||
- `tags` (map[string]string) - Tags
|
||||
|
||||
<!-- End of code generated from the comments of the Config struct in builder/cloudstack/config.go; -->
|
@ -1,34 +0,0 @@
|
||||
<!-- Code generated from the comments of the Config struct in builder/cloudstack/config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `api_url` (string) - The CloudStack API endpoint we will connect to. It can
|
||||
also be specified via environment variable CLOUDSTACK_API_URL, if set.
|
||||
|
||||
- `api_key` (string) - The API key used to sign all API requests. It can also
|
||||
be specified via environment variable CLOUDSTACK_API_KEY, if set.
|
||||
|
||||
- `secret_key` (string) - The secret key used to sign all API requests. It
|
||||
can also be specified via environment variable CLOUDSTACK_SECRET_KEY, if
|
||||
set.
|
||||
|
||||
- `network` (string) - The name or ID of the network to connect the instance
|
||||
to.
|
||||
|
||||
- `service_offering` (string) - The name or ID of the service offering used
|
||||
for the instance.
|
||||
|
||||
- `source_iso` (string) - The name or ID of an ISO that will be mounted
|
||||
before booting the instance. This option is mutually exclusive with
|
||||
source_template. When using source_iso, both disk_offering and
|
||||
hypervisor are required.
|
||||
|
||||
- `source_template` (string) - The name or ID of the template used as base
|
||||
template for the instance. This option is mutually exclusive with
|
||||
source_iso.
|
||||
|
||||
- `zone` (string) - The name or ID of the zone where the instance will be
|
||||
created.
|
||||
|
||||
- `template_os` (string) - The name or ID of the template OS for the new
|
||||
template that will be created.
|
||||
|
||||
<!-- End of code generated from the comments of the Config struct in builder/cloudstack/config.go; -->
|
@ -1,5 +0,0 @@
|
||||
<!-- Code generated from the comments of the Config struct in builder/cloudstack/config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
Config holds all the details needed to configure the builder.
|
||||
|
||||
<!-- End of code generated from the comments of the Config struct in builder/cloudstack/config.go; -->
|
@ -1,55 +0,0 @@
|
||||
<!-- Code generated from the comments of the Config struct in builder/digitalocean/config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `api_url` (string) - Non standard api endpoint URL. Set this if you are
|
||||
using a DigitalOcean API compatible service. It can also be specified via
|
||||
environment variable DIGITALOCEAN_API_URL.
|
||||
|
||||
- `private_networking` (bool) - Set to true to enable private networking
|
||||
for the droplet being created. This defaults to false, or not enabled.
|
||||
|
||||
- `monitoring` (bool) - Set to true to enable monitoring for the droplet
|
||||
being created. This defaults to false, or not enabled.
|
||||
|
||||
- `ipv6` (bool) - Set to true to enable ipv6 for the droplet being
|
||||
created. This defaults to false, or not enabled.
|
||||
|
||||
- `snapshot_name` (string) - The name of the resulting snapshot that will
|
||||
appear in your account. Defaults to `packer-{{timestamp}}` (see
|
||||
configuration templates for more info).
|
||||
|
||||
- `snapshot_regions` ([]string) - The regions of the resulting
|
||||
snapshot that will appear in your account.
|
||||
|
||||
- `state_timeout` (duration string | ex: "1h5m2s") - The time to wait, as a duration string, for a
|
||||
droplet to enter a desired state (such as "active") before timing out. The
|
||||
default state timeout is "6m".
|
||||
|
||||
- `snapshot_timeout` (duration string | ex: "1h5m2s") - How long to wait for an image to be published to the shared image
|
||||
gallery before timing out. If your Packer build is failing on the
|
||||
Publishing to Shared Image Gallery step with the error `Original Error:
|
||||
context deadline exceeded`, but the image is present when you check your
|
||||
Azure dashboard, then you probably need to increase this timeout from
|
||||
its default of "60m" (valid time units include `s` for seconds, `m` for
|
||||
minutes, and `h` for hours.)
|
||||
|
||||
- `droplet_name` (string) - The name assigned to the droplet. DigitalOcean
|
||||
sets the hostname of the machine to this value.
|
||||
|
||||
- `user_data` (string) - User data to launch with the Droplet. Packer will
|
||||
not automatically wait for a user script to finish before shutting down the
|
||||
instance this must be handled in a provisioner.
|
||||
|
||||
- `user_data_file` (string) - Path to a file that will be used for the user
|
||||
data when launching the Droplet.
|
||||
|
||||
- `tags` ([]string) - Tags to apply to the droplet when it is created
|
||||
|
||||
- `vpc_uuid` (string) - UUID of the VPC which the droplet will be created in. Before using this,
|
||||
private_networking should be enabled.
|
||||
|
||||
- `connect_with_private_ip` (bool) - Wheter the communicators should use private IP or not (public IP in that case).
|
||||
If the droplet is or going to be accessible only from the local network because
|
||||
it is at behind a firewall, then communicators should use the private IP
|
||||
instead of the public IP. Before using this, private_networking should be enabled.
|
||||
|
||||
<!-- End of code generated from the comments of the Config struct in builder/digitalocean/config.go; -->
|
@ -1,22 +0,0 @@
|
||||
<!-- Code generated from the comments of the Config struct in builder/digitalocean/config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `api_token` (string) - The client TOKEN to use to access your account. It
|
||||
can also be specified via environment variable DIGITALOCEAN_API_TOKEN, if
|
||||
set.
|
||||
|
||||
- `region` (string) - The name (or slug) of the region to launch the droplet
|
||||
in. Consequently, this is the region where the snapshot will be available.
|
||||
See
|
||||
https://developers.digitalocean.com/documentation/v2/#list-all-regions
|
||||
for the accepted region names/slugs.
|
||||
|
||||
- `size` (string) - The name (or slug) of the droplet size to use. See
|
||||
https://developers.digitalocean.com/documentation/v2/#list-all-sizes
|
||||
for the accepted size names/slugs.
|
||||
|
||||
- `image` (string) - The name (or slug) of the base image to use. This is the
|
||||
image that will be used to launch a new droplet and provision it. See
|
||||
https://developers.digitalocean.com/documentation/v2/#list-all-images
|
||||
for details on how to get a list of the accepted image names/slugs.
|
||||
|
||||
<!-- End of code generated from the comments of the Config struct in builder/digitalocean/config.go; -->
|
@ -1,58 +0,0 @@
|
||||
<!-- Code generated from the comments of the AccessConfig struct in builder/openstack/access_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `user_id` (string) - Sets username
|
||||
|
||||
- `tenant_id` (string) - The tenant ID or name to boot the instance into. Some OpenStack
|
||||
installations require this. If not specified, Packer will use the
|
||||
environment variable OS_TENANT_NAME or OS_TENANT_ID, if set. Tenant is
|
||||
also called Project in later versions of OpenStack.
|
||||
|
||||
- `tenant_name` (string) - Tenant Name
|
||||
|
||||
- `domain_id` (string) - Domain ID
|
||||
|
||||
- `domain_name` (string) - The Domain name or ID you are authenticating with. OpenStack
|
||||
installations require this if identity v3 is used. Packer will use the
|
||||
environment variable OS_DOMAIN_NAME or OS_DOMAIN_ID, if set.
|
||||
|
||||
- `insecure` (bool) - Whether or not the connection to OpenStack can be done over an insecure
|
||||
connection. By default this is false.
|
||||
|
||||
- `region` (string) - The name of the region, such as "DFW", in which to launch the server to
|
||||
create the image. If not specified, Packer will use the environment
|
||||
variable OS_REGION_NAME, if set.
|
||||
|
||||
- `endpoint_type` (string) - The endpoint type to use. Can be any of "internal", "internalURL",
|
||||
"admin", "adminURL", "public", and "publicURL". By default this is
|
||||
"public".
|
||||
|
||||
- `cacert` (string) - Custom CA certificate file path. If omitted the OS_CACERT environment
|
||||
variable can be used.
|
||||
|
||||
- `cert` (string) - Client certificate file path for SSL client authentication. If omitted
|
||||
the OS_CERT environment variable can be used.
|
||||
|
||||
- `key` (string) - Client private key file path for SSL client authentication. If omitted
|
||||
the OS_KEY environment variable can be used.
|
||||
|
||||
- `token` (string) - the token (id) to use with token based authorization. Packer will use
|
||||
the environment variable OS_TOKEN, if set.
|
||||
|
||||
- `application_credential_name` (string) - The application credential name to use with application credential based
|
||||
authorization. Packer will use the environment variable
|
||||
OS_APPLICATION_CREDENTIAL_NAME, if set.
|
||||
|
||||
- `application_credential_id` (string) - The application credential id to use with application credential based
|
||||
authorization. Packer will use the environment variable
|
||||
OS_APPLICATION_CREDENTIAL_ID, if set.
|
||||
|
||||
- `application_credential_secret` (string) - The application credential secret to use with application credential
|
||||
based authorization. Packer will use the environment variable
|
||||
OS_APPLICATION_CREDENTIAL_SECRET, if set.
|
||||
|
||||
- `cloud` (string) - An entry in a `clouds.yaml` file. See the OpenStack os-client-config
|
||||
[documentation](https://docs.openstack.org/os-client-config/latest/user/configuration.html)
|
||||
for more information about `clouds.yaml` files. If omitted, the
|
||||
`OS_CLOUD` environment variable is used.
|
||||
|
||||
<!-- End of code generated from the comments of the AccessConfig struct in builder/openstack/access_config.go; -->
|
@ -1,17 +0,0 @@
|
||||
<!-- Code generated from the comments of the AccessConfig struct in builder/openstack/access_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `username` (string) - The username or id used to connect to the OpenStack service. If not
|
||||
specified, Packer will use the environment variable OS_USERNAME or
|
||||
OS_USERID, if set. This is not required if using access token or
|
||||
application credential instead of password, or if using cloud.yaml.
|
||||
|
||||
- `password` (string) - The password used to connect to the OpenStack service. If not specified,
|
||||
Packer will use the environment variables OS_PASSWORD, if set. This is
|
||||
not required if using access token or application credential instead of
|
||||
password, or if using cloud.yaml.
|
||||
|
||||
- `identity_endpoint` (string) - The URL to the OpenStack Identity service. If not specified, Packer will
|
||||
use the environment variables OS_AUTH_URL, if set. This is not required
|
||||
if using cloud.yaml.
|
||||
|
||||
<!-- End of code generated from the comments of the AccessConfig struct in builder/openstack/access_config.go; -->
|
@ -1,5 +0,0 @@
|
||||
<!-- Code generated from the comments of the AccessConfig struct in builder/openstack/access_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
AccessConfig is for common configuration related to openstack access
|
||||
|
||||
<!-- End of code generated from the comments of the AccessConfig struct in builder/openstack/access_config.go; -->
|
@ -1,24 +0,0 @@
|
||||
<!-- Code generated from the comments of the ImageConfig struct in builder/openstack/image_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `metadata` (map[string]string) - Glance metadata that will be applied to the image.
|
||||
|
||||
- `image_visibility` (imageservice.ImageVisibility) - One of "public", "private", "shared", or "community".
|
||||
|
||||
- `image_members` ([]string) - List of members to add to the image after creation. An image member is
|
||||
usually a project (also called the "tenant") with whom the image is
|
||||
shared.
|
||||
|
||||
- `image_auto_accept_members` (bool) - When true, perform the image accept so the members can see the image in their
|
||||
project. This requires a user with priveleges both in the build project and
|
||||
in the members provided. Defaults to false.
|
||||
|
||||
- `image_disk_format` (string) - Disk format of the resulting image. This option works if
|
||||
use_blockstorage_volume is true.
|
||||
|
||||
- `image_tags` ([]string) - List of tags to add to the image after creation.
|
||||
|
||||
- `image_min_disk` (int) - Minimum disk size needed to boot image, in gigabytes.
|
||||
|
||||
- `skip_create_image` (bool) - Skip creating the image. Useful for setting to `true` during a build test stage. Defaults to `false`.
|
||||
|
||||
<!-- End of code generated from the comments of the ImageConfig struct in builder/openstack/image_config.go; -->
|
@ -1,5 +0,0 @@
|
||||
<!-- Code generated from the comments of the ImageConfig struct in builder/openstack/image_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `image_name` (string) - The name of the resulting image.
|
||||
|
||||
<!-- End of code generated from the comments of the ImageConfig struct in builder/openstack/image_config.go; -->
|
@ -1,5 +0,0 @@
|
||||
<!-- Code generated from the comments of the ImageConfig struct in builder/openstack/image_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
ImageConfig is for common configuration related to creating Images.
|
||||
|
||||
<!-- End of code generated from the comments of the ImageConfig struct in builder/openstack/image_config.go; -->
|
@ -1,10 +0,0 @@
|
||||
<!-- Code generated from the comments of the ImageFilter struct in builder/openstack/run_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `filters` (ImageFilterOptions) - filters used to select a source_image. NOTE: This will fail unless
|
||||
exactly one image is returned, or most_recent is set to true. Of the
|
||||
filters described in ImageService, the following are valid:
|
||||
|
||||
- `most_recent` (bool) - Selects the newest created image when true. This is most useful for
|
||||
selecting a daily distro build.
|
||||
|
||||
<!-- End of code generated from the comments of the ImageFilter struct in builder/openstack/run_config.go; -->
|
@ -1,13 +0,0 @@
|
||||
<!-- Code generated from the comments of the ImageFilterOptions struct in builder/openstack/run_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `name` (string) - Name
|
||||
|
||||
- `owner` (string) - Owner
|
||||
|
||||
- `tags` ([]string) - Tags
|
||||
|
||||
- `visibility` (string) - Visibility
|
||||
|
||||
- `properties` (map[string]string) - Properties
|
||||
|
||||
<!-- End of code generated from the comments of the ImageFilterOptions struct in builder/openstack/run_config.go; -->
|
@ -1,99 +0,0 @@
|
||||
<!-- Code generated from the comments of the RunConfig struct in builder/openstack/run_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `ssh_interface` (string) - The type of interface to connect via SSH. Values useful for Rackspace
|
||||
are "public" or "private", and the default behavior is to connect via
|
||||
whichever is returned first from the OpenStack API.
|
||||
|
||||
- `ssh_ip_version` (string) - The IP version to use for SSH connections, valid values are `4` and `6`.
|
||||
Useful on dual stacked instances where the default behavior is to
|
||||
connect via whichever IP address is returned first from the OpenStack
|
||||
API.
|
||||
|
||||
- `external_source_image_format` (string) - The format of the external source image to use, e.g. qcow2, raw.
|
||||
|
||||
- `external_source_image_properties` (map[string]string) - Properties to set for the external source image
|
||||
|
||||
- `availability_zone` (string) - The availability zone to launch the server in. If this isn't specified,
|
||||
the default enforced by your OpenStack cluster will be used. This may be
|
||||
required for some OpenStack clusters.
|
||||
|
||||
- `rackconnect_wait` (bool) - For rackspace, whether or not to wait for Rackconnect to assign the
|
||||
machine an IP address before connecting via SSH. Defaults to false.
|
||||
|
||||
- `floating_ip_network` (string) - The ID or name of an external network that can be used for creation of a
|
||||
new floating IP.
|
||||
|
||||
- `instance_floating_ip_net` (string) - The ID of the network to which the instance is attached and which should
|
||||
be used to associate with the floating IP. This provides control over
|
||||
the floating ip association on multi-homed instances. The association
|
||||
otherwise depends on a first-returned-interface policy which could fail
|
||||
if the network to which it is connected is unreachable from the floating
|
||||
IP network.
|
||||
|
||||
- `floating_ip` (string) - A specific floating IP to assign to this instance.
|
||||
|
||||
- `reuse_ips` (bool) - Whether or not to attempt to reuse existing unassigned floating ips in
|
||||
the project before allocating a new one. Note that it is not possible to
|
||||
safely do this concurrently, so if you are running multiple openstack
|
||||
builds concurrently, or if other processes are assigning and using
|
||||
floating IPs in the same openstack project while packer is running, you
|
||||
should not set this to true. Defaults to false.
|
||||
|
||||
- `security_groups` ([]string) - A list of security groups by name to add to this instance.
|
||||
|
||||
- `networks` ([]string) - A list of networks by UUID to attach to this instance.
|
||||
|
||||
- `ports` ([]string) - A list of ports by UUID to attach to this instance.
|
||||
|
||||
- `network_discovery_cidrs` ([]string) - A list of network CIDRs to discover the network to attach to this instance.
|
||||
The first network whose subnet is contained within any of the given CIDRs
|
||||
is used. Ignored if either of the above two options are provided.
|
||||
|
||||
- `user_data` (string) - User data to apply when launching the instance. Note that you need to be
|
||||
careful about escaping characters due to the templates being JSON. It is
|
||||
often more convenient to use user_data_file, instead. Packer will not
|
||||
automatically wait for a user script to finish before shutting down the
|
||||
instance this must be handled in a provisioner.
|
||||
|
||||
- `user_data_file` (string) - Path to a file that will be used for the user data when launching the
|
||||
instance.
|
||||
|
||||
- `instance_name` (string) - Name that is applied to the server instance created by Packer. If this
|
||||
isn't specified, the default is same as image_name.
|
||||
|
||||
- `instance_metadata` (map[string]string) - Metadata that is applied to the server instance created by Packer. Also
|
||||
called server properties in some documentation. The strings have a max
|
||||
size of 255 bytes each.
|
||||
|
||||
- `force_delete` (bool) - Whether to force the OpenStack instance to be forcefully deleted. This
|
||||
is useful for environments that have reclaim / soft deletion enabled. By
|
||||
default this is false.
|
||||
|
||||
- `config_drive` (bool) - Whether or not nova should use ConfigDrive for cloud-init metadata.
|
||||
|
||||
- `floating_ip_pool` (string) - Deprecated use floating_ip_network instead.
|
||||
|
||||
- `use_blockstorage_volume` (bool) - Use Block Storage service volume for the instance root volume instead of
|
||||
Compute service local volume (default).
|
||||
|
||||
- `volume_name` (string) - Name of the Block Storage service volume. If this isn't specified,
|
||||
random string will be used.
|
||||
|
||||
- `volume_type` (string) - Type of the Block Storage service volume. If this isn't specified, the
|
||||
default enforced by your OpenStack cluster will be used.
|
||||
|
||||
- `volume_size` (int) - Size of the Block Storage service volume in GB. If this isn't specified,
|
||||
it is set to source image min disk value (if set) or calculated from the
|
||||
source image bytes size. Note that in some cases this needs to be
|
||||
specified, if use_blockstorage_volume is true.
|
||||
|
||||
- `volume_availability_zone` (string) - Availability zone of the Block Storage service volume. If omitted,
|
||||
Compute instance availability zone will be used. If both of Compute
|
||||
instance and Block Storage volume availability zones aren't specified,
|
||||
the default enforced by your OpenStack cluster will be used.
|
||||
|
||||
- `openstack_provider` (string) - Not really used, but here for BC
|
||||
|
||||
- `use_floating_ip` (bool) - *Deprecated* use `floating_ip` or `floating_ip_pool` instead.
|
||||
|
||||
<!-- End of code generated from the comments of the RunConfig struct in builder/openstack/run_config.go; -->
|
@ -1,64 +0,0 @@
|
||||
<!-- Code generated from the comments of the RunConfig struct in builder/openstack/run_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `source_image` (string) - The ID or full URL to the base image to use. This is the image that will
|
||||
be used to launch a new server and provision it. Unless you specify
|
||||
completely custom SSH settings, the source image must have cloud-init
|
||||
installed so that the keypair gets assigned properly.
|
||||
|
||||
- `source_image_name` (string) - The name of the base image to use. This is an alternative way of
|
||||
providing source_image and only either of them can be specified.
|
||||
|
||||
- `external_source_image_url` (string) - The URL of an external base image to use. This is an alternative way of
|
||||
providing source_image and only either of them can be specified.
|
||||
|
||||
- `source_image_filter` (ImageFilter) - Filters used to populate filter options. Example:
|
||||
|
||||
```json
|
||||
{
|
||||
"source_image_filter": {
|
||||
"filters": {
|
||||
"name": "ubuntu-16.04",
|
||||
"visibility": "protected",
|
||||
"owner": "d1a588cf4b0743344508dc145649372d1",
|
||||
"tags": ["prod", "ready"],
|
||||
"properties": {
|
||||
"os_distro": "ubuntu"
|
||||
}
|
||||
},
|
||||
"most_recent": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This selects the most recent production Ubuntu 16.04 shared to you by
|
||||
the given owner. NOTE: This will fail unless *exactly* one image is
|
||||
returned, or `most_recent` is set to true. In the example of multiple
|
||||
returned images, `most_recent` will cause this to succeed by selecting
|
||||
the newest image of the returned images.
|
||||
|
||||
- `filters` (map of strings) - filters used to select a
|
||||
`source_image`.
|
||||
NOTE: This will fail unless *exactly* one image is returned, or
|
||||
`most_recent` is set to true. Of the filters described in
|
||||
[ImageService](https://developer.openstack.org/api-ref/image/v2/), the
|
||||
following are valid:
|
||||
|
||||
- name (string)
|
||||
- owner (string)
|
||||
- tags (array of strings)
|
||||
- visibility (string)
|
||||
- properties (map of strings to strings) (fields that can be set
|
||||
with `openstack image set --property key=value`)
|
||||
|
||||
- `most_recent` (boolean) - Selects the newest created image when
|
||||
true.
|
||||
This is most useful for selecting a daily distro build.
|
||||
|
||||
You may set use this in place of `source_image` If `source_image_filter`
|
||||
is provided alongside `source_image`, the `source_image` will override
|
||||
the filter. The filter will not be used in this case.
|
||||
|
||||
- `flavor` (string) - The ID, name, or full URL for the desired flavor for the server to be
|
||||
created.
|
||||
|
||||
<!-- End of code generated from the comments of the RunConfig struct in builder/openstack/run_config.go; -->
|
@ -1,6 +0,0 @@
|
||||
<!-- Code generated from the comments of the RunConfig struct in builder/openstack/run_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
RunConfig contains configuration for running an instance from a source image
|
||||
and details on how to access that launched image.
|
||||
|
||||
<!-- End of code generated from the comments of the RunConfig struct in builder/openstack/run_config.go; -->
|
@ -1,21 +0,0 @@
|
||||
<!-- Code generated from the comments of the AccessConfig struct in builder/triton/access_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `triton_url` (string) - The URL of the Triton cloud API to use. If omitted
|
||||
it will default to the us-sw-1 region of the Joyent Public cloud. If you
|
||||
are using your own private Triton installation you will have to supply the
|
||||
URL of the cloud API of your own Triton installation.
|
||||
|
||||
- `triton_user` (string) - The username of a user who has access to your
|
||||
Triton account.
|
||||
|
||||
- `triton_key_material` (string) - Path to the file in which the private key
|
||||
of triton_key_id is stored. For example /home/soandso/.ssh/id_rsa. If
|
||||
this is not specified, the SSH agent is used to sign requests with the
|
||||
triton_key_id specified.
|
||||
|
||||
- `insecure_skip_tls_verify` (bool) - secure_skip_tls_verify - (bool) This allows skipping TLS verification
|
||||
of the Triton endpoint. It is useful when connecting to a temporary Triton
|
||||
installation such as Cloud-On-A-Laptop which does not generally use a
|
||||
certificate signed by a trusted root CA. The default is false.
|
||||
|
||||
<!-- End of code generated from the comments of the AccessConfig struct in builder/triton/access_config.go; -->
|
@ -1,11 +0,0 @@
|
||||
<!-- Code generated from the comments of the AccessConfig struct in builder/triton/access_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `triton_account` (string) - The username of the Triton account to use when
|
||||
using the Triton Cloud API.
|
||||
|
||||
- `triton_key_id` (string) - The fingerprint of the public key of the SSH key
|
||||
pair to use for authentication with the Triton Cloud API. If
|
||||
triton_key_material is not set, it is assumed that the SSH agent has the
|
||||
private key corresponding to this key ID loaded.
|
||||
|
||||
<!-- End of code generated from the comments of the AccessConfig struct in builder/triton/access_config.go; -->
|
@ -1,5 +0,0 @@
|
||||
<!-- Code generated from the comments of the AccessConfig struct in builder/triton/access_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
AccessConfig is for common configuration related to Triton access
|
||||
|
||||
<!-- End of code generated from the comments of the AccessConfig struct in builder/triton/access_config.go; -->
|
@ -1,5 +0,0 @@
|
||||
<!-- Code generated from the comments of the MachineImageFilter struct in builder/triton/source_machine_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `most_recent` (bool) - Most Recent
|
||||
|
||||
<!-- End of code generated from the comments of the MachineImageFilter struct in builder/triton/source_machine_config.go; -->
|
@ -1,44 +0,0 @@
|
||||
<!-- Code generated from the comments of the SourceMachineConfig struct in builder/triton/source_machine_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `source_machine_name` (string) - Name of the VM used for building the
|
||||
image. Does not affect (and does not have to be the same) as the name for a
|
||||
VM instance running this image. Maximum 512 characters but should in
|
||||
practice be much shorter (think between 5 and 20 characters). For example
|
||||
mysql-64-server-image-builder. When omitted defaults to
|
||||
packer-builder-[image_name].
|
||||
|
||||
- `source_machine_networks` ([]string) - The UUID's of Triton
|
||||
networks added to the source machine used for creating the image. For
|
||||
example if any of the provisioners which are run need Internet access you
|
||||
will need to add the UUID's of the appropriate networks here. If this is
|
||||
not specified, instances will be placed into the default Triton public and
|
||||
internal networks.
|
||||
|
||||
- `source_machine_metadata` (map[string]string) - Triton metadata
|
||||
applied to the VM used to create the image. Metadata can be used to pass
|
||||
configuration information to the VM without the need for networking. See
|
||||
Using the metadata
|
||||
API in the
|
||||
Joyent documentation for more information. This can for example be used to
|
||||
set the user-script metadata key to have Triton start a user supplied
|
||||
script after the VM has booted.
|
||||
|
||||
- `source_machine_tags` (map[string]string) - Key/value pair tags applied to the VM used to create the image.
|
||||
|
||||
- `source_machine_tag` ([]{key string, value string}) - Same as [`source_machine_tags`](#source_machine_tags) but defined as a
|
||||
singular block containing a `key` and a `value` field. In HCL2 mode the
|
||||
[`dynamic_block`](/docs/templates/hcl_templates/expressions#dynamic-blocks)
|
||||
will allow you to create those programatically.
|
||||
|
||||
- `source_machine_firewall_enabled` (bool) - Whether or not the firewall
|
||||
of the VM used to create an image of is enabled. The Triton firewall only
|
||||
filters inbound traffic to the VM. All outbound traffic is always allowed.
|
||||
Currently this builder does not provide an interface to add specific
|
||||
firewall rules. Unless you have a global rule defined in Triton which
|
||||
allows SSH traffic enabling the firewall will interfere with the SSH
|
||||
provisioner. The default is false.
|
||||
|
||||
- `source_machine_image_filter` (MachineImageFilter) - Filters used to populate the
|
||||
source_machine_image field. Example:
|
||||
|
||||
<!-- End of code generated from the comments of the SourceMachineConfig struct in builder/triton/source_machine_config.go; -->
|
@ -1,20 +0,0 @@
|
||||
<!-- Code generated from the comments of the SourceMachineConfig struct in builder/triton/source_machine_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `source_machine_package` (string) - The Triton package to use while
|
||||
building the image. Does not affect (and does not have to be the same) as
|
||||
the package which will be used for a VM instance running this image. On the
|
||||
Joyent public cloud this could for example be g3-standard-0.5-smartos.
|
||||
|
||||
- `source_machine_image` (string) - The UUID of the image to base the new
|
||||
image on. Triton supports multiple types of images, called 'brands' in
|
||||
Triton / Joyent lingo, for contains and VM's. See the chapter Containers
|
||||
and virtual machines in
|
||||
the Joyent Triton documentation for detailed information. The following
|
||||
brands are currently supported by this builder:joyent andkvm. The
|
||||
choice of base image automatically decides the brand. On the Joyent public
|
||||
cloud a valid source_machine_image could for example be
|
||||
70e3ae72-96b6-11e6-9056-9737fd4d0764 for version 16.3.1 of the 64bit
|
||||
SmartOS base image (a 'joyent' brand image). source_machine_image_filter
|
||||
can be used to populate this UUID.
|
||||
|
||||
<!-- End of code generated from the comments of the SourceMachineConfig struct in builder/triton/source_machine_config.go; -->
|
@ -1,6 +0,0 @@
|
||||
<!-- Code generated from the comments of the SourceMachineConfig struct in builder/triton/source_machine_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
SourceMachineConfig represents the configuration to run a machine using
|
||||
the SDC API in order for provisioning to take place.
|
||||
|
||||
<!-- End of code generated from the comments of the SourceMachineConfig struct in builder/triton/source_machine_config.go; -->
|
@ -1,23 +0,0 @@
|
||||
<!-- Code generated from the comments of the TargetImageConfig struct in builder/triton/target_image_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `image_description` (string) - Description of the image. Maximum 512
|
||||
characters.
|
||||
|
||||
- `image_homepage` (string) - URL of the homepage where users can find
|
||||
information about the image. Maximum 128 characters.
|
||||
|
||||
- `image_eula_url` (string) - URL of the End User License Agreement (EULA)
|
||||
for the image. Maximum 128 characters.
|
||||
|
||||
- `image_acls` ([]string) - The UUID's of the users which will have
|
||||
access to this image. When omitted only the owner (the Triton user whose
|
||||
credentials are used) will have access to the image.
|
||||
|
||||
- `image_tags` (map[string]string) - Name/Value tags applied to the image.
|
||||
|
||||
- `image_tag` ([]{name string, value string}) - Same as [`image_tags`](#image_tags) but defined as a singular repeatable
|
||||
block containing a `name` and a `value` field. In HCL2 mode the
|
||||
[`dynamic_block`](/docs/templates/hcl_templates/expressions#dynamic-blocks)
|
||||
will allow you to create those programatically.
|
||||
|
||||
<!-- End of code generated from the comments of the TargetImageConfig struct in builder/triton/target_image_config.go; -->
|
@ -1,13 +0,0 @@
|
||||
<!-- Code generated from the comments of the TargetImageConfig struct in builder/triton/target_image_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `image_name` (string) - The name the finished image in Triton will be
|
||||
assigned. Maximum 512 characters but should in practice be much shorter
|
||||
(think between 5 and 20 characters). For example postgresql-95-server for
|
||||
an image used as a PostgreSQL 9.5 server.
|
||||
|
||||
- `image_version` (string) - The version string for this image. Maximum 128
|
||||
characters. Any string will do but a format of Major.Minor.Patch is
|
||||
strongly advised by Joyent. See Semantic Versioning
|
||||
for more information on the Major.Minor.Patch versioning format.
|
||||
|
||||
<!-- End of code generated from the comments of the TargetImageConfig struct in builder/triton/target_image_config.go; -->
|
@ -1,6 +0,0 @@
|
||||
<!-- Code generated from the comments of the TargetImageConfig struct in builder/triton/target_image_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
TargetImageConfig represents the configuration for the image to be created
|
||||
from the source machine.
|
||||
|
||||
<!-- End of code generated from the comments of the TargetImageConfig struct in builder/triton/target_image_config.go; -->
|
@ -1,6 +0,0 @@
|
||||
<!-- Code generated from the comments of the Builder struct in builder/vagrant/builder.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
Builder implements packersdk.Builder and builds the actual VirtualBox
|
||||
images.
|
||||
|
||||
<!-- End of code generated from the comments of the Builder struct in builder/vagrant/builder.go; -->
|
@ -1,92 +0,0 @@
|
||||
<!-- Code generated from the comments of the Config struct in builder/vagrant/builder.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `output_dir` (string) - The directory to create that will contain your output box. We always
|
||||
create this directory and run from inside of it to prevent Vagrant init
|
||||
collisions. If unset, it will be set to packer- plus your buildname.
|
||||
|
||||
- `checksum` (string) - The checksum for the .box file. The type of the checksum is specified
|
||||
within the checksum field as a prefix, ex: "md5:{$checksum}". The type
|
||||
of the checksum can also be omitted and Packer will try to infer it
|
||||
based on string length. Valid values are "none", "{$checksum}",
|
||||
"md5:{$checksum}", "sha1:{$checksum}", "sha256:{$checksum}",
|
||||
"sha512:{$checksum}" or "file:{$path}". Here is a list of valid checksum
|
||||
values:
|
||||
* md5:090992ba9fd140077b0661cb75f7ce13
|
||||
* 090992ba9fd140077b0661cb75f7ce13
|
||||
* sha1:ebfb681885ddf1234c18094a45bbeafd91467911
|
||||
* ebfb681885ddf1234c18094a45bbeafd91467911
|
||||
* sha256:ed363350696a726b7932db864dda019bd2017365c9e299627830f06954643f93
|
||||
* ed363350696a726b7932db864dda019bd2017365c9e299627830f06954643f93
|
||||
* file:http://releases.ubuntu.com/20.04/SHA256SUMS
|
||||
* file:file://./local/path/file.sum
|
||||
* file:./local/path/file.sum
|
||||
* none
|
||||
Although the checksum will not be verified when it is set to "none",
|
||||
this is not recommended since these files can be very large and
|
||||
corruption does happen from time to time.
|
||||
|
||||
- `box_name` (string) - if your source_box is a boxfile that we need to add to Vagrant, this is
|
||||
the name to give it. If left blank, will default to "packer_" plus your
|
||||
buildname.
|
||||
|
||||
- `insert_key` (bool) - If true, Vagrant will automatically insert a keypair to use for SSH,
|
||||
replacing Vagrant's default insecure key inside the machine if detected.
|
||||
By default, Packer sets this to false.
|
||||
|
||||
- `provider` (string) - The vagrant provider.
|
||||
This parameter is required when source_path have more than one provider,
|
||||
or when using vagrant-cloud post-processor. Defaults to unset.
|
||||
|
||||
- `vagrantfile_template` (string) - What vagrantfile to use
|
||||
|
||||
- `teardown_method` (string) - Whether to halt, suspend, or destroy the box when the build has
|
||||
completed. Defaults to "halt"
|
||||
|
||||
- `box_version` (string) - What box version to use when initializing Vagrant.
|
||||
|
||||
- `template` (string) - a path to a golang template for a vagrantfile. Our default template can
|
||||
be found here. The template variables available to you are
|
||||
`{{ .BoxName }}`, `{{ .SyncedFolder }}`, and `{{.InsertKey}}`, which
|
||||
correspond to the Packer options box_name, synced_folder, and insert_key.
|
||||
|
||||
- `synced_folder` (string) - Path to the folder to be synced to the guest. The path can be absolute
|
||||
or relative to the directory Packer is being run from.
|
||||
|
||||
- `skip_add` (bool) - Don't call "vagrant add" to add the box to your local environment; this
|
||||
is necessary if you want to launch a box that is already added to your
|
||||
vagrant environment.
|
||||
|
||||
- `add_cacert` (string) - Equivalent to setting the
|
||||
--cacert
|
||||
option in vagrant add; defaults to unset.
|
||||
|
||||
- `add_capath` (string) - Equivalent to setting the
|
||||
--capath option
|
||||
in vagrant add; defaults to unset.
|
||||
|
||||
- `add_cert` (string) - Equivalent to setting the
|
||||
--cert option in
|
||||
vagrant add; defaults to unset.
|
||||
|
||||
- `add_clean` (bool) - Equivalent to setting the
|
||||
--clean flag in
|
||||
vagrant add; defaults to unset.
|
||||
|
||||
- `add_force` (bool) - Equivalent to setting the
|
||||
--force flag in
|
||||
vagrant add; defaults to unset.
|
||||
|
||||
- `add_insecure` (bool) - Equivalent to setting the
|
||||
--insecure flag in
|
||||
vagrant add; defaults to unset.
|
||||
|
||||
- `skip_package` (bool) - if true, Packer will not call vagrant package to
|
||||
package your base box into its own standalone .box file.
|
||||
|
||||
- `output_vagrantfile` (string) - Output Vagrantfile
|
||||
|
||||
- `package_include` ([]string) - Equivalent to setting the
|
||||
[`--include`](https://www.vagrantup.com/docs/cli/package.html#include-x-y-z) option
|
||||
in `vagrant package`; defaults to unset
|
||||
|
||||
<!-- End of code generated from the comments of the Config struct in builder/vagrant/builder.go; -->
|
@ -1,18 +0,0 @@
|
||||
<!-- Code generated from the comments of the Config struct in builder/vagrant/builder.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `source_path` (string) - URL of the vagrant box to use, or the name of the vagrant box.
|
||||
hashicorp/precise64, ./mylocalbox.box and https://example.com/my-box.box
|
||||
are all valid source boxes. If your source is a .box file, whether
|
||||
locally or from a URL like the latter example above, you will also need
|
||||
to provide a box_name. This option is required, unless you set
|
||||
global_id. You may only set one or the other, not both.
|
||||
|
||||
- `global_id` (string) - the global id of a Vagrant box already added to Vagrant on your system.
|
||||
You can find the global id of your Vagrant boxes using the command
|
||||
vagrant global-status; your global_id will be a 7-digit number and
|
||||
letter comination that you'll find in the leftmost column of the
|
||||
global-status output. If you choose to use global_id instead of
|
||||
source_box, Packer will skip the Vagrant initialize and add steps, and
|
||||
simply launch the box directly using the global id.
|
||||
|
||||
<!-- End of code generated from the comments of the Config struct in builder/vagrant/builder.go; -->
|
@ -1,11 +0,0 @@
|
||||
<!-- Code generated from the comments of the AccessConfig struct in builder/yandex/access_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `endpoint` (string) - Non standard API endpoint. Default is `api.cloud.yandex.net:443`.
|
||||
|
||||
- `service_account_key_file` (string) - Path to file with Service Account key in json format. This
|
||||
is an alternative method to authenticate to Yandex.Cloud. Alternatively you may set environment variable
|
||||
`YC_SERVICE_ACCOUNT_KEY_FILE`.
|
||||
|
||||
- `max_retries` (int) - The maximum number of times an API request is being executed.
|
||||
|
||||
<!-- End of code generated from the comments of the AccessConfig struct in builder/yandex/access_config.go; -->
|
@ -1,8 +0,0 @@
|
||||
<!-- Code generated from the comments of the AccessConfig struct in builder/yandex/access_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `token` (string) - [OAuth token](https://cloud.yandex.com/docs/iam/concepts/authorization/oauth-token)
|
||||
or [IAM token](https://cloud.yandex.com/docs/iam/concepts/authorization/iam-token)
|
||||
to use to authenticate to Yandex.Cloud. Alternatively you may set
|
||||
value by environment variable `YC_TOKEN`.
|
||||
|
||||
<!-- End of code generated from the comments of the AccessConfig struct in builder/yandex/access_config.go; -->
|
@ -1,5 +0,0 @@
|
||||
<!-- Code generated from the comments of the AccessConfig struct in builder/yandex/access_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
AccessConfig is for common configuration related to Yandex.Cloud API access
|
||||
|
||||
<!-- End of code generated from the comments of the AccessConfig struct in builder/yandex/access_config.go; -->
|
@ -1,8 +0,0 @@
|
||||
<!-- Code generated from the comments of the CloudConfig struct in builder/yandex/common_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `folder_id` (string) - The folder ID that will be used to launch instances and store images.
|
||||
Alternatively you may set value by environment variable `YC_FOLDER_ID`.
|
||||
To use a different folder for looking up the source image or saving the target image to
|
||||
check options 'source_image_folder_id' and 'target_image_folder_id'.
|
||||
|
||||
<!-- End of code generated from the comments of the CloudConfig struct in builder/yandex/common_config.go; -->
|
@ -1,8 +0,0 @@
|
||||
<!-- Code generated from the comments of the CommonConfig struct in builder/yandex/common_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `serial_log_file` (string) - File path to save serial port output of the launched instance.
|
||||
|
||||
- `state_timeout` (duration string | ex: "1h5m2s") - The time to wait for instance state changes.
|
||||
Defaults to `5m`.
|
||||
|
||||
<!-- End of code generated from the comments of the CommonConfig struct in builder/yandex/common_config.go; -->
|
@ -1,8 +0,0 @@
|
||||
<!-- Code generated from the comments of the Config struct in builder/yandex/config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `service_account_id` (string) - Service account identifier to assign to instance.
|
||||
|
||||
- `target_image_folder_id` (string) - The ID of the folder to save built image in.
|
||||
This defaults to value of 'folder_id'.
|
||||
|
||||
<!-- End of code generated from the comments of the Config struct in builder/yandex/config.go; -->
|
@ -1,5 +0,0 @@
|
||||
<!-- Code generated from the comments of the Config struct in builder/yandex/config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `source_image_family` (string) - The source image family to create the new image
|
||||
from. You can also specify source_image_id instead. Just one of a source_image_id or
|
||||
source_image_family must be specified. Example: `ubuntu-1804-lts`.
|
@ -1,12 +0,0 @@
|
||||
<!-- Code generated from the comments of the DiskConfig struct in builder/yandex/common_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `disk_name` (string) - The name of the disk, if unset the instance name
|
||||
will be used.
|
||||
|
||||
- `disk_size_gb` (int) - The size of the disk in GB. This defaults to 10/100GB.
|
||||
|
||||
- `disk_type` (string) - Specify disk type for the launched instance. Defaults to `network-ssd`.
|
||||
|
||||
- `disk_labels` (map[string]string) - Key/value pair labels to apply to the disk.
|
||||
|
||||
<!-- End of code generated from the comments of the DiskConfig struct in builder/yandex/common_config.go; -->
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user