From 31b66a63b16b2b7f690ddbd0bb0be75146ce05e1 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Tue, 28 May 2019 15:24:58 +0200
Subject: [PATCH 01/97] scrape builder docs from https://www.packer.io/docs/
in order to get what is required and what is not.
---
.gitignore | 1 +
cmd/doc-required-scraper/main.go | 42 ++++++++++++++++++++++++++++++++
2 files changed, 43 insertions(+)
create mode 100644 cmd/doc-required-scraper/main.go
diff --git a/.gitignore b/.gitignore
index cdf065362..3d73b6ec6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -26,3 +26,4 @@ packer-test*.log
Thumbs.db
/packer.exe
.project
+cache
diff --git a/cmd/doc-required-scraper/main.go b/cmd/doc-required-scraper/main.go
new file mode 100644
index 000000000..a982fd263
--- /dev/null
+++ b/cmd/doc-required-scraper/main.go
@@ -0,0 +1,42 @@
+package main
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/gocolly/colly"
+)
+
+const (
+ DocsUrl = "https://www.packer.io/docs/"
+ CacheDir = "cache/"
+)
+
+func main() {
+ c := colly.NewCollector()
+
+ // Find and visit all doc pages
+ c.OnHTML("a[href]", func(e *colly.HTMLElement) {
+ url := e.Attr("href")
+ if !strings.HasPrefix(url, "/docs/builders") {
+ return
+ }
+ e.Request.Visit(url)
+ })
+
+ c.OnHTML("#required- + ul a[name]", func(e *colly.HTMLElement) {
+
+ builder := e.Request.URL.Path[strings.Index(e.Request.URL.Path, "/builders/")+len("/builders/"):]
+ builder = strings.TrimSuffix(builder, ".html")
+
+ text := e.DOM.Parent().Text()
+ text = strings.ReplaceAll(text, "\n", "")
+ text = strings.TrimSpace(text)
+
+ fmt.Printf("required: %25s builder: %20s text: %s\n", e.Attr("name"), builder, text)
+ })
+
+ c.CacheDir = CacheDir
+
+ c.Visit(DocsUrl)
+}
From e71e36af3bb55ec6fdb1cdf536eab317f3300891 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Tue, 28 May 2019 15:37:50 +0200
Subject: [PATCH 02/97] Update azure.html.md
add dashes where needed
---
website/source/docs/builders/azure.html.md | 28 +++++++++++-----------
1 file changed, 14 insertions(+), 14 deletions(-)
diff --git a/website/source/docs/builders/azure.html.md b/website/source/docs/builders/azure.html.md
index 0ab563811..320bf9814 100644
--- a/website/source/docs/builders/azure.html.md
+++ b/website/source/docs/builders/azure.html.md
@@ -45,41 +45,41 @@ If you want to use a [service principal](/docs/builders/azure-setup.html#create-
you should specify `subscription_id`, `client_id` and one of `client_secret`,
`client_cert_path` or `client_jwt`.
-- `subscription_id` (string) Subscription under which the build will be
+- `subscription_id` (string) - Subscription under which the build will be
performed. **The service principal specified in `client_id` must have full
access to this subscription, unless build\_resource\_group\_name option is
specified in which case it needs to have owner access to the existing
resource group specified in build\_resource\_group\_name parameter.**
-- `client_id` (string) The Active Directory service principal associated with
+- `client_id` (string) - The Active Directory service principal associated with
your builder.
-- `client_secret` (string) The password or secret for your service principal.
+- `client_secret` (string) - The password or secret for your service principal.
-- `client_cert_path` (string) The location of a PEM file containing a
+- `client_cert_path` (string) - The location of a PEM file containing a
certificate and private key for service principal.
-- `client_jwt` (string) The bearer JWT assertion signed using a certificate
+- `client_jwt` (string) - The bearer JWT assertion signed using a certificate
associated with your service principal principal. See [Azure Active
Directory docs](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-certificate-credentials)
for more information.
### Required:
-- `image_publisher` (string) PublisherName for your base image. See
+- `image_publisher` (string) - PublisherName for your base image. See
[documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/)
for details.
CLI example `az vm image list-publishers --location westus`
-- `image_offer` (string) Offer for your base image. See
+- `image_offer` (string) - Offer for your base image. See
[documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/)
for details.
CLI example
`az vm image list-offers --location westus --publisher Canonical`
-- `image_sku` (string) SKU for your base image. See
+- `image_sku` (string) - SKU for your base image. See
[documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/)
for details.
@@ -93,30 +93,30 @@ creating a VHD, you **must** start with a VHD. Likewise, if you want to create
a managed image you **must** start with a managed image. When creating a VHD
the following options are required.
-- `capture_container_name` (string) Destination container name. Essentially
+- `capture_container_name` (string) - Destination container name. Essentially
the "directory" where your VHD will be organized in Azure. The captured
VHD's URL will be
`https://.blob.core.windows.net/system/Microsoft.Compute/Images//.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.vhd`.
-- `capture_name_prefix` (string) VHD prefix. The final artifacts will be
+- `capture_name_prefix` (string) - VHD prefix. The final artifacts will be
named `PREFIX-osDisk.UUID` and `PREFIX-vmTemplate.UUID`.
-- `resource_group_name` (string) Resource group under which the final
+- `resource_group_name` (string) - Resource group under which the final
artifact will be stored.
-- `storage_account` (string) Storage account under which the final artifact
+- `storage_account` (string) - Storage account under which the final artifact
will be stored.
When creating a managed image the following options are required.
-- `managed_image_name` (string) Specify the managed image name where the
+- `managed_image_name` (string) - Specify the managed image name where the
result of the Packer build will be saved. The image name must not exist
ahead of time, and will not be overwritten. If this value is set, the value
`managed_image_resource_group_name` must also be set. See
[documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images)
to learn more about managed images.
-- `managed_image_resource_group_name` (string) Specify the managed image
+- `managed_image_resource_group_name` (string) - Specify the managed image
resource group name where the result of the Packer build will be saved. The
resource group must already exist. If this value is set, the value
`managed_image_name` must also be set. See
From c7808aaf1f1ab745e4d4cf58df02a21aca2ed62a Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Tue, 28 May 2019 16:47:28 +0200
Subject: [PATCH 03/97] do things locally
---
cmd/doc-required-scraper/main.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/cmd/doc-required-scraper/main.go b/cmd/doc-required-scraper/main.go
index a982fd263..caef5bcd0 100644
--- a/cmd/doc-required-scraper/main.go
+++ b/cmd/doc-required-scraper/main.go
@@ -8,7 +8,7 @@ import (
)
const (
- DocsUrl = "https://www.packer.io/docs/"
+ DocsUrl = "http://127.0.0.1:4567/docs/"
CacheDir = "cache/"
)
From da5f075a3e8e6e7c59e199153c2674b231c9feef Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 29 May 2019 11:27:54 +0200
Subject: [PATCH 04/97] Update vmware-iso.html.md.erb
fix indentation
---
.../docs/builders/vmware-iso.html.md.erb | 34 +++++++++----------
1 file changed, 17 insertions(+), 17 deletions(-)
diff --git a/website/source/docs/builders/vmware-iso.html.md.erb b/website/source/docs/builders/vmware-iso.html.md.erb
index 4c67d26be..feaa919fd 100644
--- a/website/source/docs/builders/vmware-iso.html.md.erb
+++ b/website/source/docs/builders/vmware-iso.html.md.erb
@@ -115,27 +115,27 @@ builder.
- `disk_type_id` (string) - The type of VMware virtual disk to create. This
option is for advanced usage.
- For desktop VMware clients:
+ For desktop VMware clients:
- Type ID | Description
- --- | ---
- `0` | Growable virtual disk contained in a single file (monolithic sparse).
- `1` | Growable virtual disk split into 2GB files (split sparse).
- `2` | Preallocated virtual disk contained in a single file (monolithic flat).
- `3` | Preallocated virtual disk split into 2GB files (split flat).
- `4` | Preallocated virtual disk compatible with ESX server (VMFS flat).
- `5` | Compressed disk optimized for streaming.
+ Type ID | Description
+ --- | ---
+ `0` | Growable virtual disk contained in a single file (monolithic sparse).
+ `1` | Growable virtual disk split into 2GB files (split sparse).
+ `2` | Preallocated virtual disk contained in a single file (monolithic flat).
+ `3` | Preallocated virtual disk split into 2GB files (split flat).
+ `4` | Preallocated virtual disk compatible with ESX server (VMFS flat).
+ `5` | Compressed disk optimized for streaming.
- The default is `1`.
+ The default is `1`.
- For ESXi, this defaults to `zeroedthick`. The available options for ESXi
- are: `zeroedthick`, `eagerzeroedthick`, `thin`. `rdm:dev`, `rdmp:dev`,
- `2gbsparse` are not supported. Due to default disk compaction, when using
- `zeroedthick` or `eagerzeroedthick` set `skip_compaction` to `true`.
+ For ESXi, this defaults to `zeroedthick`. The available options for ESXi
+ are: `zeroedthick`, `eagerzeroedthick`, `thin`. `rdm:dev`, `rdmp:dev`,
+ `2gbsparse` are not supported. Due to default disk compaction, when using
+ `zeroedthick` or `eagerzeroedthick` set `skip_compaction` to `true`.
- For more information, please consult the [Virtual Disk Manager User's
- Guide](https://www.vmware.com/pdf/VirtualDiskManager.pdf) for desktop
- VMware clients. For ESXi, refer to the proper ESXi documentation.
+ For more information, please consult the [Virtual Disk Manager User's
+ Guide](https://www.vmware.com/pdf/VirtualDiskManager.pdf) for desktop
+ VMware clients. For ESXi, refer to the proper ESXi documentation.
- `display_name` (string) - The name that will appear in your vSphere client,
and will be used for the vmx basename. This will override the "displayname"
From 7d52baaf0efa869a830070f9731eddc6f1a0bb7e Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 29 May 2019 12:12:18 +0200
Subject: [PATCH 05/97] Update _building_on_remote_vsphere_hypervisor.html.md
clearly mark optional fields
---
.../builders/_building_on_remote_vsphere_hypervisor.html.md | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/website/source/partials/builders/_building_on_remote_vsphere_hypervisor.html.md b/website/source/partials/builders/_building_on_remote_vsphere_hypervisor.html.md
index 8c04d4abb..4286f80a6 100644
--- a/website/source/partials/builders/_building_on_remote_vsphere_hypervisor.html.md
+++ b/website/source/partials/builders/_building_on_remote_vsphere_hypervisor.html.md
@@ -30,12 +30,13 @@ Packer builds on; a vMotion event will cause the Packer build to fail.
To use a remote VMware vSphere Hypervisor to build your virtual machine, fill in
the required `remote_*` configurations:
+### Required:
+
- `remote_type` - This must be set to "esx5".
- `remote_host` - The host of the remote machine.
-Additionally, there are some optional configurations that you'll likely have to
-modify as well:
+### Optional:
- `remote_port` - The SSH port of the remote machine
From 3ccf39c646d3aaff707c165673bc74b737e9df0d Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 29 May 2019 12:19:25 +0200
Subject: [PATCH 06/97] Update qemu.html.md.erb
fix indentation
---
website/source/docs/builders/qemu.html.md.erb | 94 +++++++++----------
1 file changed, 47 insertions(+), 47 deletions(-)
diff --git a/website/source/docs/builders/qemu.html.md.erb b/website/source/docs/builders/qemu.html.md.erb
index e611dd77e..c139740bc 100644
--- a/website/source/docs/builders/qemu.html.md.erb
+++ b/website/source/docs/builders/qemu.html.md.erb
@@ -272,63 +272,63 @@ Linux server and have not enabled X11 forwarding (`ssh -X`).
switch/value pairs. Any value specified as an empty string is ignored. All
values after the switch are concatenated with no separator.
-~> **Warning:** The qemu command line allows extreme flexibility, so beware
-of conflicting arguments causing failures of your run. For instance, using
---no-acpi could break the ability to send power signal type commands (e.g.,
-shutdown -P now) to the virtual machine, thus preventing proper shutdown. To see
-the defaults, look in the packer.log file and search for the qemu-system-x86
-command. The arguments are all printed for review.
+ ~> **Warning:** The qemu command line allows extreme flexibility, so beware
+ of conflicting arguments causing failures of your run. For instance, using
+ --no-acpi could break the ability to send power signal type commands (e.g.,
+ shutdown -P now) to the virtual machine, thus preventing proper shutdown. To see
+ the defaults, look in the packer.log file and search for the qemu-system-x86
+ command. The arguments are all printed for review.
-The following shows a sample usage:
+ The following shows a sample usage:
-``` json
-{
- "qemuargs": [
- [ "-m", "1024M" ],
- [ "--no-acpi", "" ],
- [
- "-netdev",
- "user,id=mynet0,",
- "hostfwd=hostip:hostport-guestip:guestport",
- ""
- ],
- [ "-device", "virtio-net,netdev=mynet0" ]
- ]
-}
-```
+ ``` json
+ {
+ "qemuargs": [
+ [ "-m", "1024M" ],
+ [ "--no-acpi", "" ],
+ [
+ "-netdev",
+ "user,id=mynet0,",
+ "hostfwd=hostip:hostport-guestip:guestport",
+ ""
+ ],
+ [ "-device", "virtio-net,netdev=mynet0" ]
+ ]
+ }
+ ```
-would produce the following (not including other defaults supplied by the
-builder and not otherwise conflicting with the qemuargs):
+ would produce the following (not including other defaults supplied by the
+ builder and not otherwise conflicting with the qemuargs):
-``` text
-qemu-system-x86 -m 1024m --no-acpi -netdev user,id=mynet0,hostfwd=hostip:hostport-guestip:guestport -device virtio-net,netdev=mynet0"
-```
+ ``` text
+ qemu-system-x86 -m 1024m --no-acpi -netdev user,id=mynet0,hostfwd=hostip:hostport-guestip:guestport -device virtio-net,netdev=mynet0"
+ ```
-~> **Windows Users:** [QEMU for Windows](https://qemu.weilnetz.de/) builds are available though an environmental variable does need
-to be set for QEMU for Windows to redirect stdout to the console instead of stdout.txt.
+ ~> **Windows Users:** [QEMU for Windows](https://qemu.weilnetz.de/) builds are available though an environmental variable does need
+ to be set for QEMU for Windows to redirect stdout to the console instead of stdout.txt.
-The following shows the environment variable that needs to be set for Windows QEMU support:
+ The following shows the environment variable that needs to be set for Windows QEMU support:
-``` text
-setx SDL_STDIO_REDIRECT=0
-```
+ ``` text
+ setx SDL_STDIO_REDIRECT=0
+ ```
-You can also use the `SSHHostPort` template variable to produce a packer
-template that can be invoked by `make` in parallel:
+ You can also use the `SSHHostPort` template variable to produce a packer
+ template that can be invoked by `make` in parallel:
-``` json
-{
- "qemuargs": [
- [ "-netdev", "user,hostfwd=tcp::{{ .SSHHostPort }}-:22,id=forward"],
- [ "-device", "virtio-net,netdev=forward,id=net0"]
- ]
-}
-```
+ ``` json
+ {
+ "qemuargs": [
+ [ "-netdev", "user,hostfwd=tcp::{{ .SSHHostPort }}-:22,id=forward"],
+ [ "-device", "virtio-net,netdev=forward,id=net0"]
+ ]
+ }
+ ```
-`make -j 3 my-awesome-packer-templates` spawns 3 packer processes, each of which
-will bind to their own SSH port as determined by each process. This will also
-work with WinRM, just change the port forward in `qemuargs` to map to WinRM's
-default port of `5985` or whatever value you have the service set to listen on.
+ `make -j 3 my-awesome-packer-templates` spawns 3 packer processes, each of which
+ will bind to their own SSH port as determined by each process. This will also
+ work with WinRM, just change the port forward in `qemuargs` to map to WinRM's
+ default port of `5985` or whatever value you have the service set to listen on.
- `use_backing_file` (boolean) - Only applicable when `disk_image` is `true`
and `format` is `qcow2`, set this option to `true` to create a new QCOW2
From a8c8d07727df0c4cb6464731084293bd58915901 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 29 May 2019 15:10:00 +0200
Subject: [PATCH 07/97] Update amazon-chroot.html.md.erb
use content of partial to be able to autodoc :D
---
website/source/docs/builders/amazon-chroot.html.md.erb | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/website/source/docs/builders/amazon-chroot.html.md.erb b/website/source/docs/builders/amazon-chroot.html.md.erb
index 14bc59195..0139762d8 100644
--- a/website/source/docs/builders/amazon-chroot.html.md.erb
+++ b/website/source/docs/builders/amazon-chroot.html.md.erb
@@ -159,7 +159,13 @@ each category, the available configuration keys are alphabetized.
- `insecure_skip_tls_verify` (boolean) - This allows skipping TLS
verification of the AWS EC2 endpoint. The default is `false`.
-<%= partial "partials/builders/aws-common-opional-fields" %>
+- `kms_key_id` (string) - ID, alias or ARN of the KMS key to use for boot
+ volume encryption. This only applies to the main `region`, other regions
+ where the AMI will be copied will be encrypted by the default EBS KMS key.
+ For valid formats see *KmsKeyId* in the [AWS API docs -
+ CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
+ This field is validated by Packer, when using an alias, you will have to
+ prefix `kms_key_id` with `alias/`.
- `from_scratch` (boolean) - Build a new volume instead of starting from an
existing AMI root volume snapshot. Default `false`. If `true`, `source_ami`
From 2c64a3f4df22523690ae402daafc6ff822327fe0 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 29 May 2019 15:12:16 +0200
Subject: [PATCH 08/97] source_ami_filter: fix doc indentation
---
.../docs/builders/amazon-chroot.html.md.erb | 28 ++++++++---------
.../docs/builders/amazon-ebs.html.md.erb | 24 +++++++--------
.../builders/amazon-ebssurrogate.html.md.erb | 30 +++++++++----------
.../docs/builders/amazon-instance.html.md.erb | 30 +++++++++----------
4 files changed, 56 insertions(+), 56 deletions(-)
diff --git a/website/source/docs/builders/amazon-chroot.html.md.erb b/website/source/docs/builders/amazon-chroot.html.md.erb
index 0139762d8..a7e26379f 100644
--- a/website/source/docs/builders/amazon-chroot.html.md.erb
+++ b/website/source/docs/builders/amazon-chroot.html.md.erb
@@ -290,21 +290,21 @@ each category, the available configuration keys are alphabetized.
- `source_ami_filter` (object) - Filters used to populate the `source_ami`
field. Example:
- ``` json
- "source_ami_filter": {
- "filters": {
- "virtualization-type": "hvm",
- "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
- "root-device-type": "ebs"
- },
- "owners": ["099720109477"],
- "most_recent": true
- }
- ```
+ ``` json
+ "source_ami_filter": {
+ "filters": {
+ "virtualization-type": "hvm",
+ "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
+ "root-device-type": "ebs"
+ },
+ "owners": ["099720109477"],
+ "most_recent": true
+ }
+ ```
- This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
- This will fail unless *exactly* one AMI is returned. In the above example,
- `most_recent` will cause this to succeed by selecting the newest image.
+ This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
+ This will fail unless *exactly* one AMI is returned. In the above example,
+ `most_recent` will cause this to succeed by selecting the newest image.
- `filters` (map of strings) - filters used to select a `source_ami`.
NOTE: This will fail unless *exactly* one AMI is returned. Any filter
diff --git a/website/source/docs/builders/amazon-ebs.html.md.erb b/website/source/docs/builders/amazon-ebs.html.md.erb
index 27b40f803..847e2a64b 100644
--- a/website/source/docs/builders/amazon-ebs.html.md.erb
+++ b/website/source/docs/builders/amazon-ebs.html.md.erb
@@ -313,19 +313,19 @@ builder.
- `source_ami_filter` (object) - Filters used to populate the `source_ami`
field. Example:
- ``` json
- {
- "source_ami_filter": {
- "filters": {
- "virtualization-type": "hvm",
- "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
- "root-device-type": "ebs"
- },
- "owners": ["099720109477"],
- "most_recent": true
+ ``` json
+ {
+ "source_ami_filter": {
+ "filters": {
+ "virtualization-type": "hvm",
+ "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
+ "root-device-type": "ebs"
+ },
+ "owners": ["099720109477"],
+ "most_recent": true
+ }
}
- }
- ```
+ ```
This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
This will fail unless *exactly* one AMI is returned. In the above example,
diff --git a/website/source/docs/builders/amazon-ebssurrogate.html.md.erb b/website/source/docs/builders/amazon-ebssurrogate.html.md.erb
index cc1f7fcdb..790de795b 100644
--- a/website/source/docs/builders/amazon-ebssurrogate.html.md.erb
+++ b/website/source/docs/builders/amazon-ebssurrogate.html.md.erb
@@ -314,23 +314,23 @@ builder.
- `source_ami_filter` (object) - Filters used to populate the `source_ami`
field. Example:
- ``` json
- {
- "source_ami_filter": {
- "filters": {
- "virtualization-type": "hvm",
- "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
- "root-device-type": "ebs"
- },
- "owners": ["099720109477"],
- "most_recent": true
+ ``` json
+ {
+ "source_ami_filter": {
+ "filters": {
+ "virtualization-type": "hvm",
+ "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
+ "root-device-type": "ebs"
+ },
+ "owners": ["099720109477"],
+ "most_recent": true
+ }
}
- }
- ```
+ ```
- This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
- This will fail unless *exactly* one AMI is returned. In the above example,
- `most_recent` will cause this to succeed by selecting the newest image.
+ This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
+ This will fail unless *exactly* one AMI is returned. In the above example,
+ `most_recent` will cause this to succeed by selecting the newest image.
- `filters` (map of strings) - filters used to select a `source_ami`.
NOTE: This will fail unless *exactly* one AMI is returned. Any filter
diff --git a/website/source/docs/builders/amazon-instance.html.md.erb b/website/source/docs/builders/amazon-instance.html.md.erb
index 7c8048a32..b883be897 100644
--- a/website/source/docs/builders/amazon-instance.html.md.erb
+++ b/website/source/docs/builders/amazon-instance.html.md.erb
@@ -301,23 +301,23 @@ builder.
- `source_ami_filter` (object) - Filters used to populate the `source_ami`
field. Example:
- ``` json
- {
- "source_ami_filter": {
- "filters": {
- "virtualization-type": "hvm",
- "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
- "root-device-type": "ebs"
- },
- "owners": ["099720109477"],
- "most_recent": true
+ ``` json
+ {
+ "source_ami_filter": {
+ "filters": {
+ "virtualization-type": "hvm",
+ "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
+ "root-device-type": "ebs"
+ },
+ "owners": ["099720109477"],
+ "most_recent": true
+ }
}
- }
- ```
+ ```
- This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
- This will fail unless *exactly* one AMI is returned. In the above example,
- `most_recent` will cause this to succeed by selecting the newest image.
+ This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
+ This will fail unless *exactly* one AMI is returned. In the above example,
+ `most_recent` will cause this to succeed by selecting the newest image.
- `filters` (map of strings) - filters used to select a `source_ami`.
NOTE: This will fail unless *exactly* one AMI is returned. Any filter
From 50b728b6c737c1a6c99f73df46017ed944b9eb5a Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 29 May 2019 15:16:51 +0200
Subject: [PATCH 09/97] doc hack
---
website/source/docs/builders/vmware-vmx.html.md.erb | 3 +++
1 file changed, 3 insertions(+)
diff --git a/website/source/docs/builders/vmware-vmx.html.md.erb b/website/source/docs/builders/vmware-vmx.html.md.erb
index 362a0bcc8..c283b10b8 100644
--- a/website/source/docs/builders/vmware-vmx.html.md.erb
+++ b/website/source/docs/builders/vmware-vmx.html.md.erb
@@ -59,6 +59,9 @@ builder.
- `source_path` (string) - Path to the source VMX file to clone. If
`remote_type` is enabled then this specifies a path on the `remote_host`.
+- `nopenopnopnopnponpo` (array of strings) - This is here so that the
+ previous item is correctly set.
+
### Optional:
- `boot_command` (array of strings) - This is an array of commands to type
From 105d11f2f19ffd7bca66252630a6f00668d3f1b3 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 29 May 2019 15:29:53 +0200
Subject: [PATCH 10/97] Update _building_on_remote_vsphere_hypervisor.html.md
add types
---
...lding_on_remote_vsphere_hypervisor.html.md | 22 +++++++++----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/website/source/partials/builders/_building_on_remote_vsphere_hypervisor.html.md b/website/source/partials/builders/_building_on_remote_vsphere_hypervisor.html.md
index 4286f80a6..c87429297 100644
--- a/website/source/partials/builders/_building_on_remote_vsphere_hypervisor.html.md
+++ b/website/source/partials/builders/_building_on_remote_vsphere_hypervisor.html.md
@@ -32,36 +32,36 @@ the required `remote_*` configurations:
### Required:
-- `remote_type` - This must be set to "esx5".
+- `remote_type` (string) - This must be set to "esx5".
-- `remote_host` - The host of the remote machine.
+- `remote_host` (string) - The host of the remote machine.
### Optional:
-- `remote_port` - The SSH port of the remote machine
+- `remote_port` (int) - The SSH port of the remote machine
-- `remote_datastore` - The path to the datastore where the VM will be stored
+- `remote_datastore` (string) - The path to the datastore where the VM will be stored
on the ESXi machine.
-- `remote_cache_datastore` - The path to the datastore where supporting files
+- `remote_cache_datastore` (string) - The path to the datastore where supporting files
will be stored during the build on the remote machine.
-- `remote_cache_directory` - The path where the ISO and/or floppy files will
+- `remote_cache_directory` (string) - The path where the ISO and/or floppy files will
be stored during the build on the remote machine. The path is relative to
the `remote_cache_datastore` on the remote machine.
-- `remote_username` - The SSH username used to access the remote machine.
+- `remote_username` (string) - The SSH username used to access the remote machine.
-- `remote_password` - The SSH password for access to the remote machine.
+- `remote_password` (string) - The SSH password for access to the remote machine.
-- `remote_private_key_file` - The SSH key for access to the remote machine.
+- `remote_private_key_file` (string) - The SSH key for access to the remote machine.
-- `format` (string) - Either "ovf", "ova" or "vmx", this specifies the output
+- `format` (string) (string) - Either "ovf", "ova" or "vmx", this specifies the output
format of the exported virtual machine. This defaults to "ovf".
Before using this option, you need to install `ovftool`. This option
currently only works when option remote_type is set to "esx5".
Since ovftool is only capable of password based authentication
`remote_password` must be set when exporting the VM.
-- `vnc_disable_password` - This must be set to "true" when using VNC with
+- `vnc_disable_password` (boolean) - This must be set to "true" when using VNC with
ESXi 6.5 or 6.7.
\ No newline at end of file
From 17a36b3f4d5a831127e3acbc312f04b845e16a18 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 29 May 2019 15:40:55 +0200
Subject: [PATCH 11/97] Update amazon-ebs.html.md.erb
unpartialise spot docs so that autogen works
---
.../docs/builders/amazon-ebs.html.md.erb | 27 ++++++++++++++++++-
1 file changed, 26 insertions(+), 1 deletion(-)
diff --git a/website/source/docs/builders/amazon-ebs.html.md.erb b/website/source/docs/builders/amazon-ebs.html.md.erb
index 847e2a64b..24ca3a20d 100644
--- a/website/source/docs/builders/amazon-ebs.html.md.erb
+++ b/website/source/docs/builders/amazon-ebs.html.md.erb
@@ -352,7 +352,32 @@ builder.
criteria provided in `source_ami_filter`; this pins the AMI returned by the
filter, but will cause Packer to fail if the `source_ami` does not exist.
-<%= partial "partials/builders/aws-spot-docs" %>
+- `spot_instance_types` (array of strings) - a list of acceptable instance
+ types to run your build on. We will request a spot instance using the max
+ price of `spot_price` and the allocation strategy of "lowest price".
+ Your instance will be launched on an instance type of the lowest available
+ price that you have in your list. This is used in place of instance_type.
+ You may only set either spot_instance_types or instance_type, not both.
+ This feature exists to help prevent situations where a Packer build fails
+ because a particular availability zone does not have capacity for the
+ specific instance_type requested in instance_type.
+
+- `spot_price` (string) - The maximum hourly price to pay for a spot instance
+ to create the AMI. Spot instances are a type of instance that EC2 starts
+ when the current spot price is less than the maximum price you specify.
+ Spot price will be updated based on available spot instance capacity and
+ current spot instance requests. It may save you some costs. You can set
+ this to `auto` for Packer to automatically discover the best spot price or
+ to "0" to use an on demand instance (default).
+
+- `spot_price_auto_product` (string) - Required if `spot_price` is set to
+ `auto`. This tells Packer what sort of AMI you're launching to find the
+ best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`,
+ `Windows`, `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`,
+ `Windows (Amazon VPC)`
+
+- `spot_tags` (object of key/value strings) - Requires `spot_price` to be
+ set. This tells Packer to apply tags to the spot request that is issued.
- `sriov_support` (boolean) - Enable enhanced networking (SriovNetSupport but
not ENA) on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute`
From ac9c67909ab9f35ba934d36bb6127618fb950cb8 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 29 May 2019 15:53:06 +0200
Subject: [PATCH 12/97] simplify vnc_disable_password docs
---
website/source/docs/builders/vmware-vmx.html.md.erb | 3 ---
.../builders/_building_on_remote_vsphere_hypervisor.html.md | 3 ---
2 files changed, 6 deletions(-)
diff --git a/website/source/docs/builders/vmware-vmx.html.md.erb b/website/source/docs/builders/vmware-vmx.html.md.erb
index c283b10b8..b73d2a0e2 100644
--- a/website/source/docs/builders/vmware-vmx.html.md.erb
+++ b/website/source/docs/builders/vmware-vmx.html.md.erb
@@ -262,9 +262,6 @@ builder.
binded to for VNC. By default packer will use `127.0.0.1` for this. If you
wish to bind to all interfaces use `0.0.0.0`.
-- `vnc_disable_password` (boolean) - Don't auto-generate a VNC password that
- is used to secure the VNC communication with the VM.
-
- `vnc_port_min` and `vnc_port_max` (number) - The minimum and maximum port
to use for VNC access to the virtual machine. The builder uses VNC to type
the initial `boot_command`. Because Packer generally runs in parallel,
diff --git a/website/source/partials/builders/_building_on_remote_vsphere_hypervisor.html.md b/website/source/partials/builders/_building_on_remote_vsphere_hypervisor.html.md
index c87429297..4725661cd 100644
--- a/website/source/partials/builders/_building_on_remote_vsphere_hypervisor.html.md
+++ b/website/source/partials/builders/_building_on_remote_vsphere_hypervisor.html.md
@@ -62,6 +62,3 @@ the required `remote_*` configurations:
currently only works when option remote_type is set to "esx5".
Since ovftool is only capable of password based authentication
`remote_password` must be set when exporting the VM.
-
-- `vnc_disable_password` (boolean) - This must be set to "true" when using VNC with
- ESXi 6.5 or 6.7.
\ No newline at end of file
From f667cdddf8e12ba71f292196e8a389d1e6306b05 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 29 May 2019 16:05:27 +0200
Subject: [PATCH 13/97] Update amazon-instance.html.md.erb
ident docs
---
.../docs/builders/amazon-instance.html.md.erb | 36 +++++++++----------
1 file changed, 18 insertions(+), 18 deletions(-)
diff --git a/website/source/docs/builders/amazon-instance.html.md.erb b/website/source/docs/builders/amazon-instance.html.md.erb
index b883be897..d4894de4b 100644
--- a/website/source/docs/builders/amazon-instance.html.md.erb
+++ b/website/source/docs/builders/amazon-instance.html.md.erb
@@ -479,29 +479,29 @@ builder.
- `vpc_filter` (object) - Filters used to populate the `vpc_id` field.
Example:
- ``` json
- {
- "vpc_filter": {
- "filters": {
- "tag:Class": "build",
- "isDefault": "false",
- "cidr": "/24"
+ ``` json
+ {
+ "vpc_filter": {
+ "filters": {
+ "tag:Class": "build",
+ "isDefault": "false",
+ "cidr": "/24"
+ }
}
}
- }
- ```
+ ```
- This selects the VPC with tag `Class` with the value `build`, which is not
- the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
- unless *exactly* one VPC is returned.
+ This selects the VPC with tag `Class` with the value `build`, which is not
+ the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
+ unless *exactly* one VPC is returned.
- - `filters` (map of strings) - filters used to select a `vpc_id`. NOTE:
- This will fail unless *exactly* one VPC is returned. Any filter
- described in the docs for
- [DescribeVpcs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html)
- is valid.
+ - `filters` (map of strings) - filters used to select a `vpc_id`. NOTE:
+ This will fail unless *exactly* one VPC is returned. Any filter
+ described in the docs for
+ [DescribeVpcs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html)
+ is valid.
- `vpc_id` take precedence over this.
+ `vpc_id` take precedence over this.
- `x509_upload_path` (string) - The path on the remote machine where the X509
certificate will be uploaded. This path must already exist and be writable.
From 1ad677bcaedf0cf9e064e9a6b5d45f1a97fa8b28 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 29 May 2019 16:13:41 +0200
Subject: [PATCH 14/97] aws docs indent
---
.../docs/builders/amazon-chroot.html.md.erb | 24 +++++------
.../docs/builders/amazon-ebs.html.md.erb | 40 +++++++++----------
2 files changed, 32 insertions(+), 32 deletions(-)
diff --git a/website/source/docs/builders/amazon-chroot.html.md.erb b/website/source/docs/builders/amazon-chroot.html.md.erb
index a7e26379f..25d958956 100644
--- a/website/source/docs/builders/amazon-chroot.html.md.erb
+++ b/website/source/docs/builders/amazon-chroot.html.md.erb
@@ -306,20 +306,20 @@ each category, the available configuration keys are alphabetized.
This will fail unless *exactly* one AMI is returned. In the above example,
`most_recent` will cause this to succeed by selecting the newest image.
- - `filters` (map of strings) - filters used to select a `source_ami`.
- NOTE: This will fail unless *exactly* one AMI is returned. Any filter
- described in the docs for
- [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
- is valid.
+ - `filters` (map of strings) - filters used to select a `source_ami`.
+ NOTE: This will fail unless *exactly* one AMI is returned. Any filter
+ described in the docs for
+ [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
+ is valid.
- - `owners` (array of strings) - Filters the images by their owner. You
- may specify one or more AWS account IDs, "self" (which will use the
- account whose credentials you are using to run Packer), or an AWS owner
- alias: for example, "amazon", "aws-marketplace", or "microsoft". This
- option is required for security reasons.
+ - `owners` (array of strings) - Filters the images by their owner. You
+ may specify one or more AWS account IDs, "self" (which will use the
+ account whose credentials you are using to run Packer), or an AWS owner
+ alias: for example, "amazon", "aws-marketplace", or "microsoft". This
+ option is required for security reasons.
- - `most_recent` (boolean) - Selects the newest created image when `true`.
- This is most useful for selecting a daily distro build.
+ - `most_recent` (boolean) - Selects the newest created image when `true`.
+ This is most useful for selecting a daily distro build.
You may set this in place of `source_ami` or in conjunction with it. If you
set this in conjunction with `source_ami`, the `source_ami` will be added
diff --git a/website/source/docs/builders/amazon-ebs.html.md.erb b/website/source/docs/builders/amazon-ebs.html.md.erb
index 24ca3a20d..12a64ad69 100644
--- a/website/source/docs/builders/amazon-ebs.html.md.erb
+++ b/website/source/docs/builders/amazon-ebs.html.md.erb
@@ -327,30 +327,30 @@ builder.
}
```
- This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
- This will fail unless *exactly* one AMI is returned. In the above example,
- `most_recent` will cause this to succeed by selecting the newest image.
+ This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
+ This will fail unless *exactly* one AMI is returned. In the above example,
+ `most_recent` will cause this to succeed by selecting the newest image.
- - `filters` (map of strings) - filters used to select a `source_ami`.
- NOTE: This will fail unless *exactly* one AMI is returned. Any filter
- described in the docs for
- [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
- is valid.
+ - `filters` (map of strings) - filters used to select a `source_ami`.
+ NOTE: This will fail unless *exactly* one AMI is returned. Any filter
+ described in the docs for
+ [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
+ is valid.
- - `owners` (array of strings) - Filters the images by their owner. You
- may specify one or more AWS account IDs, "self" (which will use the
- account whose credentials you are using to run Packer), or an AWS owner
- alias: for example, `amazon`, `aws-marketplace`, or `microsoft`. This
- option is required for security reasons.
+ - `owners` (array of strings) - Filters the images by their owner. You
+ may specify one or more AWS account IDs, "self" (which will use the
+ account whose credentials you are using to run Packer), or an AWS owner
+ alias: for example, `amazon`, `aws-marketplace`, or `microsoft`. This
+ option is required for security reasons.
- - `most_recent` (boolean) - Selects the newest created image when true.
- This is most useful for selecting a daily distro build.
+ - `most_recent` (boolean) - Selects the newest created image when true.
+ This is most useful for selecting a daily distro build.
- You may set this in place of `source_ami` or in conjunction with it. If you
- set this in conjunction with `source_ami`, the `source_ami` will be added
- to the filter. The provided `source_ami` must meet all of the filtering
- criteria provided in `source_ami_filter`; this pins the AMI returned by the
- filter, but will cause Packer to fail if the `source_ami` does not exist.
+ You may set this in place of `source_ami` or in conjunction with it. If you
+ set this in conjunction with `source_ami`, the `source_ami` will be added
+ to the filter. The provided `source_ami` must meet all of the filtering
+ criteria provided in `source_ami_filter`; this pins the AMI returned by the
+ filter, but will cause Packer to fail if the `source_ami` does not exist.
- `spot_instance_types` (array of strings) - a list of acceptable instance
types to run your build on. We will request a spot instance using the max
From 6660d40f56a347dc1233e8628a79081b66b67665 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 29 May 2019 16:28:33 +0200
Subject: [PATCH 15/97] indent aws docs again
---
.../docs/builders/amazon-ebs.html.md.erb | 84 +++++++++----------
.../builders/amazon-ebssurrogate.html.md.erb | 36 ++++----
.../builders/amazon-ebsvolume.html.md.erb | 36 ++++----
3 files changed, 78 insertions(+), 78 deletions(-)
diff --git a/website/source/docs/builders/amazon-ebs.html.md.erb b/website/source/docs/builders/amazon-ebs.html.md.erb
index 12a64ad69..b556e8654 100644
--- a/website/source/docs/builders/amazon-ebs.html.md.erb
+++ b/website/source/docs/builders/amazon-ebs.html.md.erb
@@ -424,36 +424,36 @@ builder.
- `subnet_filter` (object) - Filters used to populate the `subnet_id` field.
Example:
- ``` json
- {
- "subnet_filter": {
- "filters": {
- "tag:Class": "build"
- },
- "most_free": true,
- "random": false
+ ``` json
+ {
+ "subnet_filter": {
+ "filters": {
+ "tag:Class": "build"
+ },
+ "most_free": true,
+ "random": false
+ }
}
- }
- ```
+ ```
- This selects the Subnet with tag `Class` with the value `build`, which has
- the most free IP addresses. NOTE: This will fail unless *exactly* one
- Subnet is returned. By using `most_free` or `random` one will be selected
- from those matching the filter.
+ This selects the Subnet with tag `Class` with the value `build`, which has
+ the most free IP addresses. NOTE: This will fail unless *exactly* one
+ Subnet is returned. By using `most_free` or `random` one will be selected
+ from those matching the filter.
- - `filters` (map of strings) - filters used to select a `subnet_id`.
- NOTE: This will fail unless *exactly* one Subnet is returned. Any
- filter described in the docs for
- [DescribeSubnets](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html)
- is valid.
+ - `filters` (map of strings) - filters used to select a `subnet_id`.
+ NOTE: This will fail unless *exactly* one Subnet is returned. Any
+ filter described in the docs for
+ [DescribeSubnets](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html)
+ is valid.
- - `most_free` (boolean) - The Subnet with the most free IPv4 addresses
- will be used if multiple Subnets matches the filter.
+ - `most_free` (boolean) - The Subnet with the most free IPv4 addresses
+ will be used if multiple Subnets matches the filter.
- - `random` (boolean) - A random Subnet will be used if multiple Subnets
- matches the filter. `most_free` have precendence over this.
+ - `random` (boolean) - A random Subnet will be used if multiple Subnets
+ matches the filter. `most_free` have precendence over this.
- `subnet_id` take precedence over this.
+ `subnet_id` take precedence over this.
- `tags` (object of key/value strings) - Tags applied to the AMI and relevant
snapshots. This is a [template engine](../templates/engine.html), see
@@ -525,29 +525,29 @@ builder.
- `vpc_filter` (object) - Filters used to populate the `vpc_id` field.
Example:
- ``` json
- {
- "vpc_filter": {
- "filters": {
- "tag:Class": "build",
- "isDefault": "false",
- "cidr": "/24"
+ ``` json
+ {
+ "vpc_filter": {
+ "filters": {
+ "tag:Class": "build",
+ "isDefault": "false",
+ "cidr": "/24"
+ }
}
}
- }
- ```
+ ```
- This selects the VPC with tag `Class` with the value `build`, which is not
- the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
- unless *exactly* one VPC is returned.
+ This selects the VPC with tag `Class` with the value `build`, which is not
+ the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
+ unless *exactly* one VPC is returned.
- - `filters` (map of strings) - filters used to select a `vpc_id`. NOTE:
- This will fail unless *exactly* one VPC is returned. Any filter
- described in the docs for
- [DescribeVpcs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html)
- is valid.
+ - `filters` (map of strings) - filters used to select a `vpc_id`. NOTE:
+ This will fail unless *exactly* one VPC is returned. Any filter
+ described in the docs for
+ [DescribeVpcs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html)
+ is valid.
- `vpc_id` take precedence over this.
+ `vpc_id` take precedence over this.
- `windows_password_timeout` (string) - The timeout for waiting for a Windows
password for Windows instances. Defaults to 20 minutes. Example value:
diff --git a/website/source/docs/builders/amazon-ebssurrogate.html.md.erb b/website/source/docs/builders/amazon-ebssurrogate.html.md.erb
index 790de795b..5b10cc28f 100644
--- a/website/source/docs/builders/amazon-ebssurrogate.html.md.erb
+++ b/website/source/docs/builders/amazon-ebssurrogate.html.md.erb
@@ -492,29 +492,29 @@ builder.
- `vpc_filter` (object) - Filters used to populate the `vpc_id` field.
Example:
- ``` json
- {
- "vpc_filter": {
- "filters": {
- "tag:Class": "build",
- "isDefault": "false",
- "cidr": "/24"
+ ``` json
+ {
+ "vpc_filter": {
+ "filters": {
+ "tag:Class": "build",
+ "isDefault": "false",
+ "cidr": "/24"
+ }
}
}
- }
- ```
+ ```
- This selects the VPC with tag `Class` with the value `build`, which is not
- the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
- unless *exactly* one VPC is returned.
+ This selects the VPC with tag `Class` with the value `build`, which is not
+ the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
+ unless *exactly* one VPC is returned.
- - `filters` (map of strings) - filters used to select a `vpc_id`. NOTE:
- This will fail unless *exactly* one VPC is returned. Any filter
- described in the docs for
- [DescribeVpcs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html)
- is valid.
+ - `filters` (map of strings) - filters used to select a `vpc_id`. NOTE:
+ This will fail unless *exactly* one VPC is returned. Any filter
+ described in the docs for
+ [DescribeVpcs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html)
+ is valid.
- `vpc_id` take precedence over this.
+ `vpc_id` take precedence over this.
- `windows_password_timeout` (string) - The timeout for waiting for a Windows
password for Windows instances. Defaults to 20 minutes. Example value:
diff --git a/website/source/docs/builders/amazon-ebsvolume.html.md.erb b/website/source/docs/builders/amazon-ebsvolume.html.md.erb
index c9e5d885c..2b632c84c 100644
--- a/website/source/docs/builders/amazon-ebsvolume.html.md.erb
+++ b/website/source/docs/builders/amazon-ebsvolume.html.md.erb
@@ -439,29 +439,29 @@ builder.
- `vpc_filter` (object) - Filters used to populate the `vpc_id` field.
Example:
- ``` json
- {
- "vpc_filter": {
- "filters": {
- "tag:Class": "build",
- "isDefault": "false",
- "cidr": "/24"
+ ``` json
+ {
+ "vpc_filter": {
+ "filters": {
+ "tag:Class": "build",
+ "isDefault": "false",
+ "cidr": "/24"
+ }
}
}
- }
- ```
+ ```
- This selects the VPC with tag `Class` with the value `build`, which is not
- the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
- unless *exactly* one VPC is returned.
+ This selects the VPC with tag `Class` with the value `build`, which is not
+ the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
+ unless *exactly* one VPC is returned.
- - `filters` (map of strings) - filters used to select a `vpc_id`. NOTE:
- This will fail unless *exactly* one VPC is returned. Any filter
- described in the docs for
- [DescribeVpcs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html)
- is valid.
+ - `filters` (map of strings) - filters used to select a `vpc_id`. NOTE:
+ This will fail unless *exactly* one VPC is returned. Any filter
+ described in the docs for
+ [DescribeVpcs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html)
+ is valid.
- `vpc_id` take precedence over this.
+ `vpc_id` take precedence over this.
- `windows_password_timeout` (string) - The timeout for waiting for a Windows
password for Windows instances. Defaults to 20 minutes. Example value:
From ffa0431d569ddbe0ec7be9dc815be836938339f3 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 29 May 2019 16:39:40 +0200
Subject: [PATCH 16/97] aws unwrap spot cfg
---
.../builders/amazon-ebssurrogate.html.md.erb | 27 +++++++++-
.../builders/amazon-ebsvolume.html.md.erb | 54 ++++++++++++++++++-
.../docs/builders/amazon-instance.html.md.erb | 27 +++++++++-
3 files changed, 105 insertions(+), 3 deletions(-)
diff --git a/website/source/docs/builders/amazon-ebssurrogate.html.md.erb b/website/source/docs/builders/amazon-ebssurrogate.html.md.erb
index 5b10cc28f..3fe8f78d1 100644
--- a/website/source/docs/builders/amazon-ebssurrogate.html.md.erb
+++ b/website/source/docs/builders/amazon-ebssurrogate.html.md.erb
@@ -353,7 +353,32 @@ builder.
criteria provided in `source_ami_filter`; this pins the AMI returned by the
filter, but will cause Packer to fail if the `source_ami` does not exist.
-<%= partial "partials/builders/aws-spot-docs" %>
+- `spot_instance_types` (array of strings) - a list of acceptable instance
+ types to run your build on. We will request a spot instance using the max
+ price of `spot_price` and the allocation strategy of "lowest price".
+ Your instance will be launched on an instance type of the lowest available
+ price that you have in your list. This is used in place of instance_type.
+ You may only set either spot_instance_types or instance_type, not both.
+ This feature exists to help prevent situations where a Packer build fails
+ because a particular availability zone does not have capacity for the
+ specific instance_type requested in instance_type.
+
+- `spot_price` (string) - The maximum hourly price to pay for a spot instance
+ to create the AMI. Spot instances are a type of instance that EC2 starts
+ when the current spot price is less than the maximum price you specify.
+ Spot price will be updated based on available spot instance capacity and
+ current spot instance requests. It may save you some costs. You can set
+ this to `auto` for Packer to automatically discover the best spot price or
+ to "0" to use an on demand instance (default).
+
+- `spot_price_auto_product` (string) - Required if `spot_price` is set to
+ `auto`. This tells Packer what sort of AMI you're launching to find the
+ best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`,
+ `Windows`, `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`,
+ `Windows (Amazon VPC)`
+
+- `spot_tags` (object of key/value strings) - Requires `spot_price` to be
+ set. This tells Packer to apply tags to the spot request that is issued.
- `sriov_support` (boolean) - Enable enhanced networking (SriovNetSupport but
not ENA) on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute`
diff --git a/website/source/docs/builders/amazon-ebsvolume.html.md.erb b/website/source/docs/builders/amazon-ebsvolume.html.md.erb
index 2b632c84c..d2e11038a 100644
--- a/website/source/docs/builders/amazon-ebsvolume.html.md.erb
+++ b/website/source/docs/builders/amazon-ebsvolume.html.md.erb
@@ -59,6 +59,33 @@ builder.
### Optional:
+- `spot_instance_types` (array of strings) - a list of acceptable instance
+ types to run your build on. We will request a spot instance using the max
+ price of `spot_price` and the allocation strategy of "lowest price".
+ Your instance will be launched on an instance type of the lowest available
+ price that you have in your list. This is used in place of instance_type.
+ You may only set either spot_instance_types or instance_type, not both.
+ This feature exists to help prevent situations where a Packer build fails
+ because a particular availability zone does not have capacity for the
+ specific instance_type requested in instance_type.
+
+- `spot_price` (string) - The maximum hourly price to pay for a spot instance
+ to create the AMI. Spot instances are a type of instance that EC2 starts
+ when the current spot price is less than the maximum price you specify.
+ Spot price will be updated based on available spot instance capacity and
+ current spot instance requests. It may save you some costs. You can set
+ this to `auto` for Packer to automatically discover the best spot price or
+ to "0" to use an on demand instance (default).
+
+- `spot_price_auto_product` (string) - Required if `spot_price` is set to
+ `auto`. This tells Packer what sort of AMI you're launching to find the
+ best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`,
+ `Windows`, `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`,
+ `Windows (Amazon VPC)`
+
+- `spot_tags` (object of key/value strings) - Requires `spot_price` to be
+ set. This tells Packer to apply tags to the spot request that is issued.
+
- `ebs_volumes` (array of block device mappings) - Add the block device
mappings to the AMI. The block device mappings allow for keys:
@@ -302,7 +329,32 @@ builder.
criteria provided in `source_ami_filter`; this pins the AMI returned by the
filter, but will cause Packer to fail if the `source_ami` does not exist.
-<%= partial "partials/builders/aws-spot-docs" %>
+- `spot_instance_types` (array of strings) - a list of acceptable instance
+ types to run your build on. We will request a spot instance using the max
+ price of `spot_price` and the allocation strategy of "lowest price".
+ Your instance will be launched on an instance type of the lowest available
+ price that you have in your list. This is used in place of instance_type.
+ You may only set either spot_instance_types or instance_type, not both.
+ This feature exists to help prevent situations where a Packer build fails
+ because a particular availability zone does not have capacity for the
+ specific instance_type requested in instance_type.
+
+- `spot_price` (string) - The maximum hourly price to pay for a spot instance
+ to create the AMI. Spot instances are a type of instance that EC2 starts
+ when the current spot price is less than the maximum price you specify.
+ Spot price will be updated based on available spot instance capacity and
+ current spot instance requests. It may save you some costs. You can set
+ this to `auto` for Packer to automatically discover the best spot price or
+ to "0" to use an on demand instance (default).
+
+- `spot_price_auto_product` (string) - Required if `spot_price` is set to
+ `auto`. This tells Packer what sort of AMI you're launching to find the
+ best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`,
+ `Windows`, `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`,
+ `Windows (Amazon VPC)`
+
+- `spot_tags` (object of key/value strings) - Requires `spot_price` to be
+ set. This tells Packer to apply tags to the spot request that is issued.
- `sriov_support` (boolean) - Enable enhanced networking (SriovNetSupport but
not ENA) on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute`
diff --git a/website/source/docs/builders/amazon-instance.html.md.erb b/website/source/docs/builders/amazon-instance.html.md.erb
index d4894de4b..2e135b724 100644
--- a/website/source/docs/builders/amazon-instance.html.md.erb
+++ b/website/source/docs/builders/amazon-instance.html.md.erb
@@ -343,7 +343,32 @@ builder.
- `snapshot_tags` (object of key/value strings) - Tags to apply to snapshot.
They will override AMI tags if already applied to snapshot.
-<%= partial "partials/builders/aws-spot-docs" %>
+- `spot_instance_types` (array of strings) - a list of acceptable instance
+ types to run your build on. We will request a spot instance using the max
+ price of `spot_price` and the allocation strategy of "lowest price".
+ Your instance will be launched on an instance type of the lowest available
+ price that you have in your list. This is used in place of instance_type.
+ You may only set either spot_instance_types or instance_type, not both.
+ This feature exists to help prevent situations where a Packer build fails
+ because a particular availability zone does not have capacity for the
+ specific instance_type requested in instance_type.
+
+- `spot_price` (string) - The maximum hourly price to pay for a spot instance
+ to create the AMI. Spot instances are a type of instance that EC2 starts
+ when the current spot price is less than the maximum price you specify.
+ Spot price will be updated based on available spot instance capacity and
+ current spot instance requests. It may save you some costs. You can set
+ this to `auto` for Packer to automatically discover the best spot price or
+ to "0" to use an on demand instance (default).
+
+- `spot_price_auto_product` (string) - Required if `spot_price` is set to
+ `auto`. This tells Packer what sort of AMI you're launching to find the
+ best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`,
+ `Windows`, `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`,
+ `Windows (Amazon VPC)`
+
+- `spot_tags` (object of key/value strings) - Requires `spot_price` to be
+ set. This tells Packer to apply tags to the spot request that is issued.
- `sriov_support` (boolean) - Enable enhanced networking (SriovNetSupport but
not ENA) on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute`
From 1ea77d5af3315de22b7bc16066493069f8a59e4a Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Fri, 31 May 2019 11:33:06 +0200
Subject: [PATCH 17/97] Update azure.html.md
indent docs
---
website/source/docs/builders/azure.html.md | 108 ++++++++++-----------
1 file changed, 53 insertions(+), 55 deletions(-)
diff --git a/website/source/docs/builders/azure.html.md b/website/source/docs/builders/azure.html.md
index 320bf9814..e1532d0c4 100644
--- a/website/source/docs/builders/azure.html.md
+++ b/website/source/docs/builders/azure.html.md
@@ -183,11 +183,11 @@ Providing `temp_resource_group_name` or `location` in combination with
256 characters. Tags are applied to every resource deployed by a Packer
build, i.e. Resource Group, VM, NIC, VNET, Public IP, KeyVault, etc.
-- `cloud_environment_name` (string) One of `Public`, `China`, `Germany`, or
+- `cloud_environment_name` (string) - One of `Public`, `China`, `Germany`, or
`USGovernment`. Defaults to `Public`. Long forms such as
`USGovernmentCloud` and `AzureUSGovernmentCloud` are also supported.
-- `custom_data_file` (string) Specify a file containing custom data to inject
+- `custom_data_file` (string) - Specify a file containing custom data to inject
into the cloud-init process. The contents of the file are read and injected
into the ARM template. The custom data will be passed to cloud-init for
processing at the time of provisioning. See
@@ -195,21 +195,21 @@ Providing `temp_resource_group_name` or `location` in combination with
to learn more about custom data, and how it can be used to influence the
provisioning process.
-- `custom_managed_image_name` (string) Specify the source managed image's
+- `custom_managed_image_name` (string) - Specify the source managed image's
name to use. If this value is set, do not set image\_publisher,
image\_offer, image\_sku, or image\_version. If this value is set, the
value `custom_managed_image_resource_group_name` must also be set. See
[documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images)
to learn more about managed images.
-- `custom_managed_image_resource_group_name` (string) Specify the source
+- `custom_managed_image_resource_group_name` (string) - Specify the source
managed image's resource group used to use. If this value is set, do not
set image\_publisher, image\_offer, image\_sku, or image\_version. If this
value is set, the value `custom_managed_image_name` must also be set. See
[documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images)
to learn more about managed images.
-- `image_version` (string) Specify a specific version of an OS to boot from.
+- `image_version` (string) - Specify a specific version of an OS to boot from.
Defaults to `latest`. There may be a difference in versions available
across regions due to image synchronization latency. To ensure a consistent
version across regions set this value to one that is available in all
@@ -218,17 +218,17 @@ Providing `temp_resource_group_name` or `location` in combination with
CLI example
`az vm image list --location westus --publisher Canonical --offer UbuntuServer --sku 16.04.0-LTS --all`
-- `image_url` (string) Specify a custom VHD to use. If this value is set, do
+- `image_url` (string) - Specify a custom VHD to use. If this value is set, do
not set image\_publisher, image\_offer, image\_sku, or image\_version.
-- `managed_image_storage_account_type` (string) Specify the storage account
+- `managed_image_storage_account_type` (string) - Specify the storage account
type for a managed image. Valid values are Standard\_LRS and Premium\_LRS.
The default is Standard\_LRS.
-- `os_disk_size_gb` (number) Specify the size of the OS disk in GB
+- `os_disk_size_gb` (number) - Specify the size of the OS disk in GB
(gigabytes). Values of zero or less than zero are ignored.
-- `disk_caching_type` (string) Specify the disk caching type. Valid values
+- `disk_caching_type` (string) - Specify the disk caching type. Valid values
are None, ReadOnly, and ReadWrite. The default value is ReadWrite.
- `disk_additional_size` (array of integers) - The size(s) of any additional
@@ -249,7 +249,7 @@ Providing `temp_resource_group_name` or `location` in combination with
The additional disk will have the same storage account type as the OS disk,
as specified with the `managed_image_storage_account_type` setting.
-- `os_type` (string) If either `Linux` or `Windows` is specified Packer will
+- `os_type` (string) - If either `Linux` or `Windows` is specified Packer will
automatically configure authentication credentials for the provisioned
machine. For `Linux` this configures an SSH authorized key. For `Windows`
this configures a WinRM certificate.
@@ -260,103 +260,101 @@ Providing `temp_resource_group_name` or `location` in combination with
all Marketplace images support programmatic deployment, and support is
controlled by the image publisher.
- An example plan\_info object is defined below.
+ An example plan\_info object is defined below.
- ``` json
- {
- "plan_info": {
- "plan_name": "rabbitmq",
- "plan_product": "rabbitmq",
- "plan_publisher": "bitnami"
- }
- }
- ```
+ ``` json
+ {
+ "plan_info": {
+ "plan_name": "rabbitmq",
+ "plan_product": "rabbitmq",
+ "plan_publisher": "bitnami"
+ }
+ }
+ ```
- `plan_name` (string) - The plan name, required. `plan_product` (string) -
- The plan product, required. `plan_publisher` (string) - The plan publisher,
- required. `plan_promotion_code` (string) - Some images accept a promotion
- code, optional.
+ `plan_name` (string) - The plan name, required. `plan_product` (string) -
+ The plan product, required. `plan_publisher` (string) - The plan publisher,
+ required. `plan_promotion_code` (string) - Some images accept a promotion
+ code, optional.
- Images created from the Marketplace with `plan_info` **must** specify
- `plan_info` whenever the image is deployed. The builder automatically adds
- tags to the image to ensure this information is not lost. The following
- tags are added.
+ Images created from the Marketplace with `plan_info` **must** specify
+ `plan_info` whenever the image is deployed. The builder automatically adds
+ tags to the image to ensure this information is not lost. The following
+ tags are added.
- 1. PlanName
- 2. PlanProduct
- 3. PlanPublisher
- 4. PlanPromotionCode
+ 1. PlanName
+ 2. PlanProduct
+ 3. PlanPublisher
+ 4. PlanPromotionCode
-- `shared_image_gallery` (object) Use a [Shared Gallery
+- `shared_image_gallery` (object) - Use a [Shared Gallery
image](https://azure.microsoft.com/en-us/blog/announcing-the-public-preview-of-shared-image-gallery/)
as the source for this build. *VHD targets are incompatible with this build
type* - the target must be a *Managed Image*.
-
+ "shared_image_gallery": {
+ "subscription": "00000000-0000-0000-0000-00000000000",
+ "resource_group": "ResourceGroup",
+ "gallery_name": "GalleryName",
+ "image_name": "ImageName",
+ "image_version": "1.0.0"
+ }
+ "managed_image_name": "TargetImageName",
+ "managed_image_resource_group_name": "TargetResourceGroup"
- "shared_image_gallery": {
- "subscription": "00000000-0000-0000-0000-00000000000",
- "resource_group": "ResourceGroup",
- "gallery_name": "GalleryName",
- "image_name": "ImageName",
- "image_version": "1.0.0"
- }
- "managed_image_name": "TargetImageName",
- "managed_image_resource_group_name": "TargetResourceGroup"
-
-- `temp_compute_name` (string) temporary name assigned to the VM. If this
+- `temp_compute_name` (string) - temporary name assigned to the VM. If this
value is not set, a random value will be assigned. Knowing the resource
group and VM name allows one to execute commands to update the VM during a
Packer build, e.g. attach a resource disk to the VM.
-- `tenant_id` (string) The account identifier with which your `client_id` and
+- `tenant_id` (string) - The account identifier with which your `client_id` and
`subscription_id` are associated. If not specified, `tenant_id` will be
looked up using `subscription_id`.
-- `private_virtual_network_with_public_ip` (boolean) This value allows you to
+- `private_virtual_network_with_public_ip` (boolean) - This value allows you to
set a `virtual_network_name` and obtain a public IP. If this value is not
set and `virtual_network_name` is defined Packer is only allowed to be
executed from a host on the same subnet / virtual network.
-- `virtual_network_name` (string) Use a pre-existing virtual network for the
+- `virtual_network_name` (string) - Use a pre-existing virtual network for the
VM. This option enables private communication with the VM, no public IP
address is **used** or **provisioned** (unless you set
`private_virtual_network_with_public_ip`).
-- `virtual_network_resource_group_name` (string) If virtual\_network\_name is
+- `virtual_network_resource_group_name` (string) - If virtual\_network\_name is
set, this value **may** also be set. If virtual\_network\_name is set, and
this value is not set the builder attempts to determine the resource group
containing the virtual network. If the resource group cannot be found, or
it cannot be disambiguated, this value should be set.
-- `virtual_network_subnet_name` (string) If virtual\_network\_name is set,
+- `virtual_network_subnet_name` (string) - If virtual\_network\_name is set,
this value **may** also be set. If virtual\_network\_name is set, and this
value is not set the builder attempts to determine the subnet to use with
the virtual network. If the subnet cannot be found, or it cannot be
disambiguated, this value should be set.
-- `vm_size` (string) Size of the VM used for building. This can be changed
+- `vm_size` (string) - Size of the VM used for building. This can be changed
when you deploy a VM from your VHD. See
[pricing](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/)
information. Defaults to `Standard_A1`.
CLI example `az vm list-sizes --location westus`
-- `async_resourcegroup_delete` (boolean) If you want packer to delete the
+- `async_resourcegroup_delete` (boolean) - If you want packer to delete the
temporary resource group asynchronously set this value. It's a boolean
value and defaults to false. **Important** Setting this true means that
your builds are faster, however any failed deletes are not reported.
-- `managed_image_os_disk_snapshot_name` (string) If
+- `managed_image_os_disk_snapshot_name` (string) - If
managed\_image\_os\_disk\_snapshot\_name is set, a snapshot of the OS disk
is created with the same name as this value before the VM is captured.
-- `managed_image_data_disk_snapshot_prefix` (string) If
+- `managed_image_data_disk_snapshot_prefix` (string) - If
managed\_image\_data\_disk\_snapshot\_prefix is set, snapshot of the data
disk(s) is created with the same prefix as this value before the VM is
captured.
-- `managed_image_zone_resilient` (bool) Store the image in zone-resilient storage. You need to create it
+- `managed_image_zone_resilient` (bool) - Store the image in zone-resilient storage. You need to create it
in a region that supports [availability zones](https://docs.microsoft.com/en-us/azure/availability-zones/az-overview).
## Basic Example
From b00412d0553731106bc2f40f250b5df54bd5f42d Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Fri, 31 May 2019 12:26:39 +0200
Subject: [PATCH 18/97] Update alicloud-ecs.html.md
indent
---
website/source/docs/builders/alicloud-ecs.html.md | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/website/source/docs/builders/alicloud-ecs.html.md b/website/source/docs/builders/alicloud-ecs.html.md
index 22839d032..ff7e4b0d1 100644
--- a/website/source/docs/builders/alicloud-ecs.html.md
+++ b/website/source/docs/builders/alicloud-ecs.html.md
@@ -87,13 +87,13 @@ builder.
disk.
- `disk_category` (string) - Category of the system disk. Optional values
are:
- - `cloud` - general cloud disk
- - `cloud_efficiency` - efficiency cloud disk
- - `cloud_ssd` - cloud SSD
+ - `cloud` - general cloud disk
+ - `cloud_efficiency` - efficiency cloud disk
+ - `cloud_ssd` - cloud SSD
- For phased-out instance types and non-I/O optimized instances, the
- default value is cloud. Otherwise, the default value is
- cloud\_efficiency.
+ For phased-out instance types and non-I/O optimized instances, the
+ default value is cloud. Otherwise, the default value is
+ cloud\_efficiency.
- `disk_description` (string) - The value of disk description is blank by
default. \[2, 256\] characters. The disk description will appear on the
From feafa8b18b67bb9a7632fe0b742cc23f58e58ffe Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Fri, 31 May 2019 12:27:06 +0200
Subject: [PATCH 19/97] make sure the vpc_filter field is exported correctly
---
.../docs/builders/amazon-ebs.html.md.erb | 26 -------------
.../builders/amazon-ebssurrogate.html.md.erb | 27 -------------
.../builders/amazon-ebsvolume.html.md.erb | 38 +++++++++---------
.../docs/builders/amazon-instance.html.md.erb | 39 +++++++++----------
4 files changed, 38 insertions(+), 92 deletions(-)
diff --git a/website/source/docs/builders/amazon-ebs.html.md.erb b/website/source/docs/builders/amazon-ebs.html.md.erb
index b556e8654..9ec0c496e 100644
--- a/website/source/docs/builders/amazon-ebs.html.md.erb
+++ b/website/source/docs/builders/amazon-ebs.html.md.erb
@@ -522,32 +522,6 @@ builder.
`subnet_id` to be set. If this field is left blank, Packer will try to get
the VPC ID from the `subnet_id`.
-- `vpc_filter` (object) - Filters used to populate the `vpc_id` field.
- Example:
-
- ``` json
- {
- "vpc_filter": {
- "filters": {
- "tag:Class": "build",
- "isDefault": "false",
- "cidr": "/24"
- }
- }
- }
- ```
-
- This selects the VPC with tag `Class` with the value `build`, which is not
- the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
- unless *exactly* one VPC is returned.
-
- - `filters` (map of strings) - filters used to select a `vpc_id`. NOTE:
- This will fail unless *exactly* one VPC is returned. Any filter
- described in the docs for
- [DescribeVpcs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html)
- is valid.
-
- `vpc_id` take precedence over this.
- `windows_password_timeout` (string) - The timeout for waiting for a Windows
password for Windows instances. Defaults to 20 minutes. Example value:
diff --git a/website/source/docs/builders/amazon-ebssurrogate.html.md.erb b/website/source/docs/builders/amazon-ebssurrogate.html.md.erb
index 3fe8f78d1..969c1e5c3 100644
--- a/website/source/docs/builders/amazon-ebssurrogate.html.md.erb
+++ b/website/source/docs/builders/amazon-ebssurrogate.html.md.erb
@@ -514,33 +514,6 @@ builder.
`subnet_id` to be set. If this field is left blank, Packer will try to get
the VPC ID from the `subnet_id`.
-- `vpc_filter` (object) - Filters used to populate the `vpc_id` field.
- Example:
-
- ``` json
- {
- "vpc_filter": {
- "filters": {
- "tag:Class": "build",
- "isDefault": "false",
- "cidr": "/24"
- }
- }
- }
- ```
-
- This selects the VPC with tag `Class` with the value `build`, which is not
- the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
- unless *exactly* one VPC is returned.
-
- - `filters` (map of strings) - filters used to select a `vpc_id`. NOTE:
- This will fail unless *exactly* one VPC is returned. Any filter
- described in the docs for
- [DescribeVpcs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html)
- is valid.
-
- `vpc_id` take precedence over this.
-
- `windows_password_timeout` (string) - The timeout for waiting for a Windows
password for Windows instances. Defaults to 20 minutes. Example value:
`10m`
diff --git a/website/source/docs/builders/amazon-ebsvolume.html.md.erb b/website/source/docs/builders/amazon-ebsvolume.html.md.erb
index d2e11038a..43ac0cde3 100644
--- a/website/source/docs/builders/amazon-ebsvolume.html.md.erb
+++ b/website/source/docs/builders/amazon-ebsvolume.html.md.erb
@@ -489,31 +489,31 @@ builder.
the VPC ID from the `subnet_id`.
- `vpc_filter` (object) - Filters used to populate the `vpc_id` field.
+ `vpc_id` take precedence over this.
Example:
- ``` json
- {
- "vpc_filter": {
- "filters": {
- "tag:Class": "build",
- "isDefault": "false",
- "cidr": "/24"
+ ``` json
+ {
+ "vpc_filter": {
+ "filters": {
+ "tag:Class": "build",
+ "isDefault": "false",
+ "cidr": "/24"
+ }
+ }
}
- }
- }
- ```
+ ```
- This selects the VPC with tag `Class` with the value `build`, which is not
- the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
- unless *exactly* one VPC is returned.
+ This selects the VPC with tag `Class` with the value `build`, which is not
+ the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
+ unless *exactly* one VPC is returned.
- - `filters` (map of strings) - filters used to select a `vpc_id`. NOTE:
- This will fail unless *exactly* one VPC is returned. Any filter
- described in the docs for
- [DescribeVpcs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html)
- is valid.
+ - `filters` (map of strings) - filters used to select a `vpc_id`. NOTE:
+ This will fail unless *exactly* one VPC is returned. Any filter
+ described in the docs for
+ [DescribeVpcs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html)
+ is valid.
- `vpc_id` take precedence over this.
- `windows_password_timeout` (string) - The timeout for waiting for a Windows
password for Windows instances. Defaults to 20 minutes. Example value:
diff --git a/website/source/docs/builders/amazon-instance.html.md.erb b/website/source/docs/builders/amazon-instance.html.md.erb
index 2e135b724..e19912e7c 100644
--- a/website/source/docs/builders/amazon-instance.html.md.erb
+++ b/website/source/docs/builders/amazon-instance.html.md.erb
@@ -502,31 +502,30 @@ builder.
the VPC ID from the `subnet_id`.
- `vpc_filter` (object) - Filters used to populate the `vpc_id` field.
+ `vpc_id` take precedence over this.
Example:
- ``` json
- {
- "vpc_filter": {
- "filters": {
- "tag:Class": "build",
- "isDefault": "false",
- "cidr": "/24"
+ ``` json
+ {
+ "vpc_filter": {
+ "filters": {
+ "tag:Class": "build",
+ "isDefault": "false",
+ "cidr": "/24"
+ }
+ }
}
- }
- }
- ```
+ ```
- This selects the VPC with tag `Class` with the value `build`, which is not
- the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
- unless *exactly* one VPC is returned.
+ This selects the VPC with tag `Class` with the value `build`, which is not
+ the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
+ unless *exactly* one VPC is returned.
- - `filters` (map of strings) - filters used to select a `vpc_id`. NOTE:
- This will fail unless *exactly* one VPC is returned. Any filter
- described in the docs for
- [DescribeVpcs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html)
- is valid.
-
- `vpc_id` take precedence over this.
+ - `filters` (map of strings) - filters used to select a `vpc_id`. NOTE:
+ This will fail unless *exactly* one VPC is returned. Any filter
+ described in the docs for
+ [DescribeVpcs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html)
+ is valid.
- `x509_upload_path` (string) - The path on the remote machine where the X509
certificate will be uploaded. This path must already exist and be writable.
From f1917edd344c4cbbbd3e60cf73b48331833263de Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Tue, 28 May 2019 17:50:58 +0200
Subject: [PATCH 20/97] generate the comments for config struct of builders
scraping doc website
---
builder/alicloud/ecs/access_config.go | 23 ++-
builder/alicloud/ecs/image_config.go | 119 +++++++++---
builder/alicloud/ecs/run_config.go | 105 ++++++++--
builder/amazon/chroot/builder.go | 113 +++++++++--
builder/amazon/common/access_config.go | 69 +++++--
builder/amazon/common/ami_config.go | 120 ++++++++++--
builder/amazon/common/block_device.go | 77 ++++++--
builder/amazon/common/run_config.go | 170 ++++++++++++++---
builder/amazon/ebssurrogate/builder.go | 10 +-
.../amazon/ebssurrogate/root_block_device.go | 28 ++-
builder/amazon/ebsvolume/block_device.go | 5 +-
builder/amazon/ebsvolume/builder.go | 24 ++-
builder/amazon/instance/builder.go | 48 ++++-
builder/azure/arm/clientconfig.go | 12 +-
builder/azure/arm/config.go | 178 +++++++++++++----
builder/cloudstack/config.go | 162 ++++++++++++----
builder/digitalocean/config.go | 73 +++++--
builder/docker/config.go | 59 +++++-
builder/docker/ecr_login.go | 21 +-
builder/googlecompute/config.go | 171 +++++++++++++----
builder/hyperone/config.go | 91 ++++++---
builder/hyperv/common/output_config.go | 9 +-
builder/hyperv/common/shutdown_config.go | 16 +-
builder/hyperv/iso/builder.go | 180 ++++++++++++------
builder/hyperv/vmcx/builder.go | 158 ++++++++++-----
builder/lxc/config.go | 58 ++++--
builder/lxd/config.go | 28 ++-
builder/ncloud/config.go | 43 ++++-
builder/openstack/access_config.go | 83 ++++++--
builder/openstack/image_config.go | 24 ++-
builder/openstack/run_config.go | 139 ++++++++++----
builder/parallels/common/hw_config.go | 20 +-
builder/parallels/common/output_config.go | 8 +-
builder/parallels/common/prlctl_config.go | 12 +-
.../parallels/common/prlctl_post_config.go | 5 +-
.../parallels/common/prlctl_version_config.go | 7 +-
builder/parallels/common/shutdown_config.go | 11 +-
builder/parallels/common/tools_config.go | 23 ++-
builder/parallels/iso/builder.go | 46 ++++-
builder/parallels/pvm/config.go | 22 ++-
builder/qemu/builder.go | 160 +++++++++++++---
builder/scaleway/config.go | 55 ++++--
builder/tencentcloud/cvm/access_config.go | 21 +-
builder/tencentcloud/cvm/image_config.go | 30 ++-
builder/tencentcloud/cvm/run_config.go | 64 +++++--
builder/triton/access_config.go | 32 +++-
builder/triton/source_machine_config.go | 62 +++++-
builder/triton/target_image_config.go | 32 +++-
builder/vagrant/builder.go | 111 ++++++++---
builder/virtualbox/common/export_config.go | 4 +-
builder/virtualbox/common/export_opts.go | 7 +-
.../common/guest_additions_config.go | 9 +-
builder/virtualbox/common/hw_config.go | 21 +-
builder/virtualbox/common/output_config.go | 8 +-
builder/virtualbox/common/run_config.go | 18 +-
builder/virtualbox/common/shutdown_config.go | 21 +-
builder/virtualbox/common/ssh_config.go | 13 +-
.../virtualbox/common/vbox_version_config.go | 8 +-
.../virtualbox/common/vboxbundle_config.go | 6 +-
.../virtualbox/common/vboxmanage_config.go | 12 +-
.../common/vboxmanage_post_config.go | 5 +-
builder/virtualbox/iso/builder.go | 91 +++++++--
builder/virtualbox/ovf/config.go | 86 +++++++--
builder/vmware/common/driver_config.go | 47 +++--
builder/vmware/common/export_config.go | 44 ++++-
builder/vmware/common/hw_config.go | 53 ++++--
builder/vmware/common/output_config.go | 8 +-
builder/vmware/common/run_config.go | 27 ++-
builder/vmware/common/shutdown_config.go | 11 +-
builder/vmware/common/tools_config.go | 14 +-
builder/vmware/common/vmx_config.go | 25 ++-
builder/vmware/iso/config.go | 79 ++++++--
builder/vmware/vmx/config.go | 21 +-
builder/yandex/config.go | 105 +++++++---
cmd/doc-required-scraper/main.go | 63 +++++-
75 files changed, 3097 insertions(+), 846 deletions(-)
diff --git a/builder/alicloud/ecs/access_config.go b/builder/alicloud/ecs/access_config.go
index e0ace80ea..acc789d31 100644
--- a/builder/alicloud/ecs/access_config.go
+++ b/builder/alicloud/ecs/access_config.go
@@ -12,11 +12,24 @@ import (
// Config of alicloud
type AlicloudAccessConfig struct {
- AlicloudAccessKey string `mapstructure:"access_key"`
- AlicloudSecretKey string `mapstructure:"secret_key"`
- AlicloudRegion string `mapstructure:"region"`
- AlicloudSkipValidation bool `mapstructure:"skip_region_validation"`
- SecurityToken string `mapstructure:"security_token"`
+ // This is the Alicloud access key. It must be
+ // provided, but it can also be sourced from the ALICLOUD_ACCESS_KEY
+ // environment variable.
+ AlicloudAccessKey string `mapstructure:"access_key" required:"true"`
+ // This is the Alicloud secret key. It must be
+ // provided, but it can also be sourced from the ALICLOUD_SECRET_KEY
+ // environment variable.
+ AlicloudSecretKey string `mapstructure:"secret_key" required:"true"`
+ // This is the Alicloud region. It must be provided, but
+ // it can also be sourced from the ALICLOUD_REGION environment variables.
+ AlicloudRegion string `mapstructure:"region" required:"true"`
+ // The region validation can be skipped
+ // if this value is true, the default value is false.
+ AlicloudSkipValidation bool `mapstructure:"skip_region_validation" required:"false"`
+ // STS access token, can be set through template
+ // or by exporting as environment variable such as
+ // export SecurityToken=value.
+ SecurityToken string `mapstructure:"security_token" required:"false"`
client *ClientWrapper
}
diff --git a/builder/alicloud/ecs/image_config.go b/builder/alicloud/ecs/image_config.go
index 80193ff56..98cb0c834 100644
--- a/builder/alicloud/ecs/image_config.go
+++ b/builder/alicloud/ecs/image_config.go
@@ -9,36 +9,111 @@ import (
)
type AlicloudDiskDevice struct {
- DiskName string `mapstructure:"disk_name"`
- DiskCategory string `mapstructure:"disk_category"`
- DiskSize int `mapstructure:"disk_size"`
- SnapshotId string `mapstructure:"disk_snapshot_id"`
- Description string `mapstructure:"disk_description"`
- DeleteWithInstance bool `mapstructure:"disk_delete_with_instance"`
- Device string `mapstructure:"disk_device"`
- Encrypted *bool `mapstructure:"disk_encrypted"`
+ // The value of disk name is blank by default. [2,
+ // 128] English or Chinese characters, must begin with an
+ // uppercase/lowercase letter or Chinese character. Can contain numbers,
+ // ., _ and -. The disk name will appear on the console. It cannot
+ // begin with http:// or https://.
+ DiskName string `mapstructure:"disk_name" required:"false"`
+ // Category of the system disk. Optional values
+ // are:
+ // - cloud - general cloud disk
+ // - cloud_efficiency - efficiency cloud disk
+ // - cloud_ssd - cloud SSD
+ DiskCategory string `mapstructure:"disk_category" required:"false"`
+ // Size of the system disk, measured in GiB. Value
+ // range: [20, 500]. The specified value must be equal to or greater
+ // than max{20, ImageSize}. Default value: max{40, ImageSize}.
+ DiskSize int `mapstructure:"disk_size" required:"false"`
+ // Snapshots are used to create the data
+ // disk After this parameter is specified, Size is ignored. The actual
+ // size of the created disk is the size of the specified snapshot.
+ SnapshotId string `mapstructure:"disk_snapshot_id" required:"false"`
+ // The value of disk description is blank by
+ // default. [2, 256] characters. The disk description will appear on the
+ // console. It cannot begin with http:// or https://.
+ Description string `mapstructure:"disk_description" required:"false"`
+ // Whether or not the disk is
+ // released along with the instance:
+ DeleteWithInstance bool `mapstructure:"disk_delete_with_instance" required:"false"`
+ // Device information of the related instance:
+ // such as /dev/xvdb It is null unless the Status is In_use.
+ Device string `mapstructure:"disk_device" required:"false"`
+ // Whether or not to encrypt the data disk.
+ // If this option is set to true, the data disk will be encryped and corresponding snapshot in the target image will also be encrypted. By
+ // default, if this is an extra data disk, Packer will not encrypt the
+ // data disk. Otherwise, Packer will keep the encryption setting to what
+ // it was in the source image. Please refer to Introduction of ECS disk encryption
+ // for more details.
+ Encrypted *bool `mapstructure:"disk_encrypted" required:"false"`
}
type AlicloudDiskDevices struct {
- ECSSystemDiskMapping AlicloudDiskDevice `mapstructure:"system_disk_mapping"`
- ECSImagesDiskMappings []AlicloudDiskDevice `mapstructure:"image_disk_mappings"`
+ // Image disk mapping for system
+ // disk.
+ ECSSystemDiskMapping AlicloudDiskDevice `mapstructure:"system_disk_mapping" required:"false"`
+ // Add one or more data
+ // disks to the image.
+ ECSImagesDiskMappings []AlicloudDiskDevice `mapstructure:"image_disk_mappings" required:"false"`
}
type AlicloudImageConfig struct {
- AlicloudImageName string `mapstructure:"image_name"`
- AlicloudImageVersion string `mapstructure:"image_version"`
- AlicloudImageDescription string `mapstructure:"image_description"`
- AlicloudImageShareAccounts []string `mapstructure:"image_share_account"`
+ // The name of the user-defined image, [2, 128]
+ // English or Chinese characters. It must begin with an uppercase/lowercase
+ // letter or a Chinese character, and may contain numbers, _ or -. It
+ // cannot begin with http:// or https://.
+ AlicloudImageName string `mapstructure:"image_name" required:"true"`
+ // The version number of the image, with a length
+ // limit of 1 to 40 English characters.
+ AlicloudImageVersion string `mapstructure:"image_version" required:"false"`
+ // The description of the image, with a length
+ // limit of 0 to 256 characters. Leaving it blank means null, which is the
+ // default value. It cannot begin with http:// or https://.
+ AlicloudImageDescription string `mapstructure:"image_description" required:"false"`
+ // The IDs of to-be-added Aliyun
+ // accounts to which the image is shared. The number of accounts is 1 to 10.
+ // If number of accounts is greater than 10, this parameter is ignored.
+ AlicloudImageShareAccounts []string `mapstructure:"image_share_account" required:"false"`
AlicloudImageUNShareAccounts []string `mapstructure:"image_unshare_account"`
- AlicloudImageDestinationRegions []string `mapstructure:"image_copy_regions"`
- AlicloudImageDestinationNames []string `mapstructure:"image_copy_names"`
- ImageEncrypted *bool `mapstructure:"image_encrypted"`
- AlicloudImageForceDelete bool `mapstructure:"image_force_delete"`
- AlicloudImageForceDeleteSnapshots bool `mapstructure:"image_force_delete_snapshots"`
+ // Copy to the destination regionIds.
+ AlicloudImageDestinationRegions []string `mapstructure:"image_copy_regions" required:"false"`
+ // The name of the destination image,
+ // [2, 128] English or Chinese characters. It must begin with an
+ // uppercase/lowercase letter or a Chinese character, and may contain numbers,
+ // _ or -. It cannot begin with http:// or https://.
+ AlicloudImageDestinationNames []string `mapstructure:"image_copy_names" required:"false"`
+ // Whether or not to encrypt the target images, including those copied if image_copy_regions is specified. If this option
+ // is set to true, a temporary image will be created from the provisioned
+ // instance in the main region and an encrypted copy will be generated in the
+ // same region. By default, Packer will keep the encryption setting to what
+ // it was in the source image.
+ ImageEncrypted *bool `mapstructure:"image_encrypted" required:"false"`
+ // If this value is true, when the target
+ // image names including those copied are duplicated with existing images, it
+ // will delete the existing images and then create the target images,
+ // otherwise, the creation will fail. The default value is false. Check
+ // image_name and image_copy_names options for names of target images. If
+ // -force option is
+ // provided in build command, this option can be omitted and taken as true.
+ AlicloudImageForceDelete bool `mapstructure:"image_force_delete" required:"false"`
+ // If this value is true, when
+ // delete the duplicated existing images, the source snapshots of those images
+ // will be delete either. If
+ // -force option is
+ // provided in build command, this option can be omitted and taken as true.
+ AlicloudImageForceDeleteSnapshots bool `mapstructure:"image_force_delete_snapshots" required:"false"`
AlicloudImageForceDeleteInstances bool `mapstructure:"image_force_delete_instances"`
- AlicloudImageIgnoreDataDisks bool `mapstructure:"image_ignore_data_disks"`
- AlicloudImageSkipRegionValidation bool `mapstructure:"skip_region_validation"`
- AlicloudImageTags map[string]string `mapstructure:"tags"`
+ // If this value is true, the image
+ // created will not include any snapshot of data disks. This option would be
+ // useful for any circumstance that default data disks with instance types are
+ // not concerned. The default value is false.
+ AlicloudImageIgnoreDataDisks bool `mapstructure:"image_ignore_data_disks" required:"false"`
+ // The region validation can be skipped
+ // if this value is true, the default value is false.
+ AlicloudImageSkipRegionValidation bool `mapstructure:"skip_region_validation" required:"false"`
+ // Tags applied to the destination
+ // image and relevant snapshots.
+ AlicloudImageTags map[string]string `mapstructure:"tags" required:"false"`
AlicloudDiskDevices `mapstructure:",squash"`
}
diff --git a/builder/alicloud/ecs/run_config.go b/builder/alicloud/ecs/run_config.go
index e36a33854..a6d4de5a3 100644
--- a/builder/alicloud/ecs/run_config.go
+++ b/builder/alicloud/ecs/run_config.go
@@ -13,30 +13,95 @@ import (
type RunConfig struct {
AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address"`
- ZoneId string `mapstructure:"zone_id"`
- IOOptimized bool `mapstructure:"io_optimized"`
- InstanceType string `mapstructure:"instance_type"`
+ // ID of the zone to which the disk belongs.
+ ZoneId string `mapstructure:"zone_id" required:"false"`
+ // Whether an ECS instance is I/O optimized or not.
+ // The default value is false.
+ IOOptimized bool `mapstructure:"io_optimized" required:"false"`
+ // Type of the instance. For values, see Instance
+ // Type
+ // Table.
+ // You can also obtain the latest instance type table by invoking the
+ // Querying Instance Type
+ // Table
+ // interface.
+ InstanceType string `mapstructure:"instance_type" required:"true"`
Description string `mapstructure:"description"`
- AlicloudSourceImage string `mapstructure:"source_image"`
- ForceStopInstance bool `mapstructure:"force_stop_instance"`
- DisableStopInstance bool `mapstructure:"disable_stop_instance"`
- SecurityGroupId string `mapstructure:"security_group_id"`
- SecurityGroupName string `mapstructure:"security_group_name"`
- UserData string `mapstructure:"user_data"`
- UserDataFile string `mapstructure:"user_data_file"`
- VpcId string `mapstructure:"vpc_id"`
- VpcName string `mapstructure:"vpc_name"`
- CidrBlock string `mapstructure:"vpc_cidr_block"`
- VSwitchId string `mapstructure:"vswitch_id"`
- VSwitchName string `mapstructure:"vswitch_id"`
- InstanceName string `mapstructure:"instance_name"`
- InternetChargeType string `mapstructure:"internet_charge_type"`
- InternetMaxBandwidthOut int `mapstructure:"internet_max_bandwidth_out"`
- WaitSnapshotReadyTimeout int `mapstructure:"wait_snapshot_ready_timeout"`
+ // This is the base image id which you want to
+ // create your customized images.
+ AlicloudSourceImage string `mapstructure:"source_image" required:"true"`
+ // Whether to force shutdown upon device
+ // restart. The default value is false.
+ ForceStopInstance bool `mapstructure:"force_stop_instance" required:"false"`
+ // If this option is set to true, Packer
+ // will not stop the instance for you, and you need to make sure the instance
+ // will be stopped in the final provisioner command. Otherwise, Packer will
+ // timeout while waiting the instance to be stopped. This option is provided
+ // for some specific scenarios that you want to stop the instance by yourself.
+ // E.g., Sysprep a windows which may shutdown the instance within its command.
+ // The default value is false.
+ DisableStopInstance bool `mapstructure:"disable_stop_instance" required:"false"`
+ // ID of the security group to which a newly
+ // created instance belongs. Mutual access is allowed between instances in one
+ // security group. If not specified, the newly created instance will be added
+ // to the default security group. If the default group doesn’t exist, or the
+ // number of instances in it has reached the maximum limit, a new security
+ // group will be created automatically.
+ SecurityGroupId string `mapstructure:"security_group_id" required:"false"`
+ // The security group name. The default value
+ // is blank. [2, 128] English or Chinese characters, must begin with an
+ // uppercase/lowercase letter or Chinese character. Can contain numbers, .,
+ // _ or -. It cannot begin with http:// or https://.
+ SecurityGroupName string `mapstructure:"security_group_name" required:"false"`
+ // User data to apply when launching the instance. Note
+ // that you need to be careful about escaping characters due to the templates
+ // being JSON. It is often more convenient to use user_data_file, instead.
+ // Packer will not automatically wait for a user script to finish before
+ // shutting down the instance this must be handled in a provisioner.
+ UserData string `mapstructure:"user_data" required:"false"`
+ // Path to a file that will be used for the user
+ // data when launching the instance.
+ UserDataFile string `mapstructure:"user_data_file" required:"false"`
+ // VPC ID allocated by the system.
+ VpcId string `mapstructure:"vpc_id" required:"false"`
+ // The VPC name. The default value is blank. [2, 128]
+ // English or Chinese characters, must begin with an uppercase/lowercase
+ // letter or Chinese character. Can contain numbers, _ and -. The disk
+ // description will appear on the console. Cannot begin with http:// or
+ // https://.
+ VpcName string `mapstructure:"vpc_name" required:"false"`
+ // Value options: 192.168.0.0/16 and
+ // 172.16.0.0/16. When not specified, the default value is 172.16.0.0/16.
+ CidrBlock string `mapstructure:"vpc_cidr_block" required:"false"`
+ // The ID of the VSwitch to be used.
+ VSwitchId string `mapstructure:"vswitch_id" required:"false"`
+ // The ID of the VSwitch to be used.
+ VSwitchName string `mapstructure:"vswitch_id" required:"false"`
+ // Display name of the instance, which is a string
+ // of 2 to 128 Chinese or English characters. It must begin with an
+ // uppercase/lowercase letter or a Chinese character and can contain numerals,
+ // ., _, or -. The instance name is displayed on the Alibaba Cloud
+ // console. If this parameter is not specified, the default value is
+ // InstanceId of the instance. It cannot begin with http:// or https://.
+ InstanceName string `mapstructure:"instance_name" required:"false"`
+ // Internet charge type, which can be
+ // PayByTraffic or PayByBandwidth. Optional values:
+ InternetChargeType string `mapstructure:"internet_charge_type" required:"false"`
+ // Maximum outgoing bandwidth to the
+ // public network, measured in Mbps (Mega bits per second).
+ InternetMaxBandwidthOut int `mapstructure:"internet_max_bandwidth_out" required:"false"`
+ // Timeout of creating snapshot(s).
+ // The default timeout is 3600 seconds if this option is not set or is set
+ // to 0. For those disks containing lots of data, it may require a higher
+ // timeout value.
+ WaitSnapshotReadyTimeout int `mapstructure:"wait_snapshot_ready_timeout" required:"false"`
// Communicator settings
Comm communicator.Config `mapstructure:",squash"`
- SSHPrivateIp bool `mapstructure:"ssh_private_ip"`
+ // If this value is true, packer will connect to
+ // the ECS created through private ip instead of allocating a public ip or an
+ // EIP. The default value is false.
+ SSHPrivateIp bool `mapstructure:"ssh_private_ip" required:"false"`
}
func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/amazon/chroot/builder.go b/builder/amazon/chroot/builder.go
index af075b6cf..1c4a52f42 100644
--- a/builder/amazon/chroot/builder.go
+++ b/builder/amazon/chroot/builder.go
@@ -28,25 +28,100 @@ type Config struct {
awscommon.AMIBlockDevices `mapstructure:",squash"`
awscommon.AMIConfig `mapstructure:",squash"`
awscommon.AccessConfig `mapstructure:",squash"`
-
- ChrootMounts [][]string `mapstructure:"chroot_mounts"`
- CommandWrapper string `mapstructure:"command_wrapper"`
- CopyFiles []string `mapstructure:"copy_files"`
- DevicePath string `mapstructure:"device_path"`
- NVMEDevicePath string `mapstructure:"nvme_device_path"`
- FromScratch bool `mapstructure:"from_scratch"`
- MountOptions []string `mapstructure:"mount_options"`
- MountPartition string `mapstructure:"mount_partition"`
- MountPath string `mapstructure:"mount_path"`
- PostMountCommands []string `mapstructure:"post_mount_commands"`
- PreMountCommands []string `mapstructure:"pre_mount_commands"`
- RootDeviceName string `mapstructure:"root_device_name"`
- RootVolumeSize int64 `mapstructure:"root_volume_size"`
- RootVolumeType string `mapstructure:"root_volume_type"`
- SourceAmi string `mapstructure:"source_ami"`
- SourceAmiFilter awscommon.AmiFilterOptions `mapstructure:"source_ami_filter"`
- RootVolumeTags awscommon.TagMap `mapstructure:"root_volume_tags"`
- Architecture string `mapstructure:"ami_architecture"`
+ // This is a list of devices to
+ // mount into the chroot environment. This configuration parameter requires
+ // some additional documentation which is in the Chroot
+ // Mounts section. Please read that section for more
+ // information on how to use this.
+ ChrootMounts [][]string `mapstructure:"chroot_mounts" required:"false"`
+ // How to run shell commands. This defaults to
+ // {{.Command}}. This may be useful to set if you want to set environmental
+ // variables or perhaps run it with sudo or so on. This is a configuration
+ // template where the .Command variable is replaced with the command to be
+ // run. Defaults to {{.Command}}.
+ CommandWrapper string `mapstructure:"command_wrapper" required:"false"`
+ // Paths to files on the running EC2
+ // instance that will be copied into the chroot environment prior to
+ // provisioning. Defaults to /etc/resolv.conf so that DNS lookups work. Pass
+ // an empty list to skip copying /etc/resolv.conf. You may need to do this
+ // if you're building an image that uses systemd.
+ CopyFiles []string `mapstructure:"copy_files" required:"false"`
+ // The path to the device where the root volume of
+ // the source AMI will be attached. This defaults to "" (empty string), which
+ // forces Packer to find an open device automatically.
+ DevicePath string `mapstructure:"device_path" required:"false"`
+ // When we call the mount command (by default
+ // mount -o device dir), the string provided in nvme_mount_path will
+ // replace device in that command. When this option is not set, device in
+ // that command will be something like /dev/sdf1, mirroring the attached
+ // device name. This assumption works for most instances but will fail with c5
+ // and m5 instances. In order to use the chroot builder with c5 and m5
+ // instances, you must manually set nvme_device_path and device_path.
+ NVMEDevicePath string `mapstructure:"nvme_device_path" required:"false"`
+ // Build a new volume instead of starting from an
+ // existing AMI root volume snapshot. Default false. If true, source_ami
+ // is no longer used and the following options become required:
+ // ami_virtualization_type, pre_mount_commands and root_volume_size. The
+ // below options are also required in this mode only:
+ FromScratch bool `mapstructure:"from_scratch" required:"false"`
+ // Options to supply the mount command
+ // when mounting devices. Each option will be prefixed with -o and supplied
+ // to the mount command ran by Packer. Because this command is ran in a
+ // shell, user discretion is advised. See this manual page for the mount
+ // command for valid file
+ // system specific options.
+ MountOptions []string `mapstructure:"mount_options" required:"false"`
+ // The partition number containing the /
+ // partition. By default this is the first partition of the volume, (for
+ // example, xvda1) but you can designate the entire block device by setting
+ // "mount_partition": "0" in your config, which will mount xvda instead.
+ MountPartition string `mapstructure:"mount_partition" required:"false"`
+ // The path where the volume will be mounted. This is
+ // where the chroot environment will be. This defaults to
+ // /mnt/packer-amazon-chroot-volumes/{{.Device}}. This is a configuration
+ // template where the .Device variable is replaced with the name of the
+ // device where the volume is attached.
+ MountPath string `mapstructure:"mount_path" required:"false"`
+ // As pre_mount_commands, but the
+ // commands are executed after mounting the root device and before the extra
+ // mount and copy steps. The device and mount path are provided by
+ // {{.Device}} and {{.MountPath}}.
+ PostMountCommands []string `mapstructure:"post_mount_commands" required:"false"`
+ // A series of commands to execute
+ // after attaching the root volume and before mounting the chroot. This is not
+ // required unless using from_scratch. If so, this should include any
+ // partitioning and filesystem creation commands. The path to the device is
+ // provided by {{.Device}}.
+ PreMountCommands []string `mapstructure:"pre_mount_commands" required:"false"`
+ // The root device name. For example, xvda.
+ RootDeviceName string `mapstructure:"root_device_name" required:"false"`
+ // The size of the root volume in GB for the
+ // chroot environment and the resulting AMI. Default size is the snapshot size
+ // of the source_ami unless from_scratch is true, in which case this
+ // field must be defined.
+ RootVolumeSize int64 `mapstructure:"root_volume_size" required:"false"`
+ // The type of EBS volume for the chroot
+ // environment and resulting AMI. The default value is the type of the
+ // source_ami, unless from_scratch is true, in which case the default
+ // value is gp2. You can only specify io1 if building based on top of a
+ // source_ami which is also io1.
+ RootVolumeType string `mapstructure:"root_volume_type" required:"false"`
+ // The source AMI whose root volume will be copied and
+ // provisioned on the currently running instance. This must be an EBS-backed
+ // AMI with a root volume snapshot that you have access to. Note: this is not
+ // used when from_scratch is set to true.
+ SourceAmi string `mapstructure:"source_ami" required:"true"`
+ // Filters used to populate the source_ami
+ // field. Example:
+ SourceAmiFilter awscommon.AmiFilterOptions `mapstructure:"source_ami_filter" required:"false"`
+ // Tags to apply to the
+ // volumes that are launched. This is a template
+ // engine, see Build template
+ // data for more information.
+ RootVolumeTags awscommon.TagMap `mapstructure:"root_volume_tags" required:"false"`
+ // what architecture to use when registering the
+ // final AMI; valid options are "x86_64" or "arm64". Defaults to "x86_64".
+ Architecture string `mapstructure:"ami_architecture" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/amazon/common/access_config.go b/builder/amazon/common/access_config.go
index adf0e053e..e3beac5ea 100644
--- a/builder/amazon/common/access_config.go
+++ b/builder/amazon/common/access_config.go
@@ -21,7 +21,16 @@ import (
type VaultAWSEngineOptions struct {
Name string `mapstructure:"name"`
RoleARN string `mapstructure:"role_arn"`
- TTL string `mapstructure:"ttl"`
+ // Specifies the TTL for the use of the STS token. This
+ // is specified as a string with a duration suffix. Valid only when
+ // credential_type is assumed_role or federation_token. When not
+ // specified, the default_sts_ttl set for the role will be used. If that
+ // is also not set, then the default value of 3600s will be used. AWS
+ // places limits on the maximum TTL allowed. See the AWS documentation on
+ // the DurationSeconds parameter for AssumeRole (for assumed_role
+ // credential types) and GetFederationToken (for federation_token
+ // credential types) for more details.
+ TTL string `mapstructure:"ttl" required:"false"`
EngineName string `mapstructure:"engine_name"`
}
@@ -32,19 +41,55 @@ func (v *VaultAWSEngineOptions) Empty() bool {
// AccessConfig is for common configuration related to AWS access
type AccessConfig struct {
- AccessKey string `mapstructure:"access_key"`
- CustomEndpointEc2 string `mapstructure:"custom_endpoint_ec2"`
- DecodeAuthZMessages bool `mapstructure:"decode_authorization_messages"`
- InsecureSkipTLSVerify bool `mapstructure:"insecure_skip_tls_verify"`
- MFACode string `mapstructure:"mfa_code"`
- ProfileName string `mapstructure:"profile"`
- RawRegion string `mapstructure:"region"`
- SecretKey string `mapstructure:"secret_key"`
- SkipValidation bool `mapstructure:"skip_region_validation"`
+ // The access key used to communicate with AWS. Learn
+ // how to set this
+ AccessKey string `mapstructure:"access_key" required:"true"`
+ // This option is useful if you use a cloud
+ // provider whose API is compatible with aws EC2. Specify another endpoint
+ // like this https://ec2.custom.endpoint.com.
+ CustomEndpointEc2 string `mapstructure:"custom_endpoint_ec2" required:"false"`
+ // Enable automatic decoding of
+ // any encoded authorization (error) messages using the
+ // sts:DecodeAuthorizationMessage API. Note: requires that the effective
+ // user/role have permissions to sts:DecodeAuthorizationMessage on resource
+ // *. Default false.
+ DecodeAuthZMessages bool `mapstructure:"decode_authorization_messages" required:"false"`
+ // This allows skipping TLS
+ // verification of the AWS EC2 endpoint. The default is false.
+ InsecureSkipTLSVerify bool `mapstructure:"insecure_skip_tls_verify" required:"false"`
+ // The MFA
+ // TOTP
+ // code. This should probably be a user variable since it changes all the
+ // time.
+ MFACode string `mapstructure:"mfa_code" required:"false"`
+ // The profile to use in the shared credentials file for
+ // AWS. See Amazon's documentation on specifying
+ // profiles
+ // for more details.
+ ProfileName string `mapstructure:"profile" required:"false"`
+ // The name of the region, such as us-east-1, in which
+ // to launch the EC2 instance to create the AMI.
+ RawRegion string `mapstructure:"region" required:"true"`
+ // The secret key used to communicate with AWS. Learn
+ // how to set this
+ SecretKey string `mapstructure:"secret_key" required:"true"`
+ // Set to true if you want to skip
+ // validation of the ami_regions configuration option. Default false.
+ SkipValidation bool `mapstructure:"skip_region_validation" required:"false"`
SkipMetadataApiCheck bool `mapstructure:"skip_metadata_api_check"`
- Token string `mapstructure:"token"`
+ // The access token to use. This is different from the
+ // access key and secret key. If you're not sure what this is, then you
+ // probably don't need it. This will also be read from the AWS_SESSION_TOKEN
+ // environmental variable.
+ Token string `mapstructure:"token" required:"false"`
session *session.Session
- VaultAWSEngine VaultAWSEngineOptions `mapstructure:"vault_aws_engine"`
+ // Get credentials from Hashicorp Vault's aws
+ // secrets engine. You must already have created a role to use. For more
+ // information about generating credentials via the Vault engine, see the
+ // Vault
+ // docs.
+ // If you set this flag, you must also set the below options:
+ VaultAWSEngine VaultAWSEngineOptions `mapstructure:"vault_aws_engine" required:"false"`
getEC2Connection func() ec2iface.EC2API
}
diff --git a/builder/amazon/common/ami_config.go b/builder/amazon/common/ami_config.go
index 343d7f1bd..d231aac73 100644
--- a/builder/amazon/common/ami_config.go
+++ b/builder/amazon/common/ami_config.go
@@ -10,25 +10,107 @@ import (
// AMIConfig is for common configuration related to creating AMIs.
type AMIConfig struct {
- AMIName string `mapstructure:"ami_name"`
- AMIDescription string `mapstructure:"ami_description"`
- AMIVirtType string `mapstructure:"ami_virtualization_type"`
- AMIUsers []string `mapstructure:"ami_users"`
- AMIGroups []string `mapstructure:"ami_groups"`
- AMIProductCodes []string `mapstructure:"ami_product_codes"`
- AMIRegions []string `mapstructure:"ami_regions"`
- AMISkipRegionValidation bool `mapstructure:"skip_region_validation"`
- AMITags TagMap `mapstructure:"tags"`
- AMIENASupport *bool `mapstructure:"ena_support"`
- AMISriovNetSupport bool `mapstructure:"sriov_support"`
- AMIForceDeregister bool `mapstructure:"force_deregister"`
- AMIForceDeleteSnapshot bool `mapstructure:"force_delete_snapshot"`
- AMIEncryptBootVolume *bool `mapstructure:"encrypt_boot"`
- AMIKmsKeyId string `mapstructure:"kms_key_id"`
- AMIRegionKMSKeyIDs map[string]string `mapstructure:"region_kms_key_ids"`
- SnapshotTags TagMap `mapstructure:"snapshot_tags"`
- SnapshotUsers []string `mapstructure:"snapshot_users"`
- SnapshotGroups []string `mapstructure:"snapshot_groups"`
+ // The name of the resulting AMI that will appear when
+ // managing AMIs in the AWS console or via APIs. This must be unique. To help
+ // make this unique, use a function like timestamp (see template
+ // engine for more info).
+ AMIName string `mapstructure:"ami_name" required:"true"`
+ // The description to set for the resulting
+ // AMI(s). By default this description is empty. This is a template
+ // engine, see Build template
+ // data for more information.
+ AMIDescription string `mapstructure:"ami_description" required:"false"`
+ // The type of virtualization for the AMI
+ // you are building. This option is required to register HVM images. Can be
+ // paravirtual (default) or hvm.
+ AMIVirtType string `mapstructure:"ami_virtualization_type" required:"false"`
+ // A list of account IDs that have access to
+ // launch the resulting AMI(s). By default no additional users other than the
+ // user creating the AMI has permissions to launch it.
+ AMIUsers []string `mapstructure:"ami_users" required:"false"`
+ // A list of groups that have access to
+ // launch the resulting AMI(s). By default no groups have permission to launch
+ // the AMI. all will make the AMI publicly accessible.
+ AMIGroups []string `mapstructure:"ami_groups" required:"false"`
+ // A list of product codes to
+ // associate with the AMI. By default no product codes are associated with the
+ // AMI.
+ AMIProductCodes []string `mapstructure:"ami_product_codes" required:"false"`
+ // A list of regions to copy the AMI to.
+ // Tags and attributes are copied along with the AMI. AMI copying takes time
+ // depending on the size of the AMI, but will generally take many minutes.
+ AMIRegions []string `mapstructure:"ami_regions" required:"false"`
+ // Set to true if you want to skip
+ // validation of the ami_regions configuration option. Default false.
+ AMISkipRegionValidation bool `mapstructure:"skip_region_validation" required:"false"`
+ // Tags applied to the AMI. This is a
+ // template engine, see Build template
+ // data for more information.
+ AMITags TagMap `mapstructure:"tags" required:"false"`
+ // Enable enhanced networking (ENA but not
+ // SriovNetSupport) on HVM-compatible AMIs. If set, add
+ // ec2:ModifyInstanceAttribute to your AWS IAM policy. If false, this will
+ // disable enhanced networking in the final AMI as opposed to passing the
+ // setting through unchanged from the source. Note: you must make sure
+ // enhanced networking is enabled on your instance. See Amazon's
+ // documentation on enabling enhanced
+ // networking.
+ AMIENASupport *bool `mapstructure:"ena_support" required:"false"`
+ // Enable enhanced networking (SriovNetSupport but
+ // not ENA) on HVM-compatible AMIs. If true, add
+ // ec2:ModifyInstanceAttribute to your AWS IAM policy. Note: you must make
+ // sure enhanced networking is enabled on your instance. See Amazon's
+ // documentation on enabling enhanced
+ // networking.
+ // Default false.
+ AMISriovNetSupport bool `mapstructure:"sriov_support" required:"false"`
+ // Force Packer to first deregister an existing
+ // AMI if one with the same name already exists. Default false.
+ AMIForceDeregister bool `mapstructure:"force_deregister" required:"false"`
+ // Force Packer to delete snapshots
+ // associated with AMIs, which have been deregistered by force_deregister.
+ // Default false.
+ AMIForceDeleteSnapshot bool `mapstructure:"force_delete_snapshot" required:"false"`
+ // Whether or not to encrypt the resulting AMI when
+ // copying a provisioned instance to an AMI. By default, Packer will keep the
+ // encryption setting to what it was in the source image. Setting false will
+ // result in an unencrypted image, and true will result in an encrypted one.
+ AMIEncryptBootVolume *bool `mapstructure:"encrypt_boot" required:"false"`
+ // ID, alias or ARN of the KMS key to use for boot
+ // volume encryption. This only applies to the main region, other regions
+ // where the AMI will be copied will be encrypted by the default EBS KMS key.
+ // For valid formats see KmsKeyId in the AWS API docs -
+ // CopyImage.
+ // This field is validated by Packer, when using an alias, you will have to
+ // prefix kms_key_id with alias/.
+ AMIKmsKeyId string `mapstructure:"kms_key_id" required:"false"`
+ // a map of regions to copy the ami
+ // to, along with the custom kms key id (alias or arn) to use for encryption
+ // for that region. Keys must match the regions provided in ami_regions. If
+ // you just want to encrypt using a default ID, you can stick with
+ // kms_key_id and ami_regions. If you want a region to be encrypted with
+ // that region's default key ID, you can use an empty string "" instead of a
+ // key id in this map. (e.g. "us-east-1": "") However, you cannot use
+ // default key IDs if you are using this in conjunction with snapshot_users
+ // -- in that situation you must use custom keys. For valid formats see
+ // KmsKeyId in the AWS API docs -
+ // CopyImage.
+ AMIRegionKMSKeyIDs map[string]string `mapstructure:"region_kms_key_ids" required:"false"`
+ // Tags to apply to snapshot.
+ // They will override AMI tags if already applied to snapshot. This is a
+ // template engine, see Build template
+ // data for more information.
+ SnapshotTags TagMap `mapstructure:"snapshot_tags" required:"false"`
+ // A list of account IDs that have
+ // access to create volumes from the snapshot(s). By default no additional
+ // users other than the user creating the AMI has permissions to create
+ // volumes from the backing snapshot(s).
+ SnapshotUsers []string `mapstructure:"snapshot_users" required:"false"`
+ // A list of groups that have access to
+ // create volumes from the snapshot(s). By default no groups have permission
+ // to create volumes from the snapshot(s). all will make the snapshot
+ // publicly accessible.
+ SnapshotGroups []string `mapstructure:"snapshot_groups" required:"false"`
}
func stringInSlice(s []string, searchstr string) bool {
diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go
index 06fda046b..9592be58a 100644
--- a/builder/amazon/common/block_device.go
+++ b/builder/amazon/common/block_device.go
@@ -11,16 +11,52 @@ import (
// BlockDevice
type BlockDevice struct {
- DeleteOnTermination bool `mapstructure:"delete_on_termination"`
- DeviceName string `mapstructure:"device_name"`
- Encrypted *bool `mapstructure:"encrypted"`
- IOPS int64 `mapstructure:"iops"`
- NoDevice bool `mapstructure:"no_device"`
- SnapshotId string `mapstructure:"snapshot_id"`
- VirtualName string `mapstructure:"virtual_name"`
- VolumeType string `mapstructure:"volume_type"`
- VolumeSize int64 `mapstructure:"volume_size"`
- KmsKeyId string `mapstructure:"kms_key_id"`
+ // Indicates whether the EBS volume is
+ // deleted on instance termination. Default false. NOTE: If this
+ // value is not explicitly set to true and volumes are not cleaned up by
+ // an alternative method, additional volumes will accumulate after every
+ // build.
+ DeleteOnTermination bool `mapstructure:"delete_on_termination" required:"false"`
+ // The device name exposed to the instance (for
+ // example, /dev/sdh or xvdh). Required for every device in the block
+ // device mapping.
+ DeviceName string `mapstructure:"device_name" required:"false"`
+ // Indicates whether or not to encrypt the volume.
+ // By default, Packer will keep the encryption setting to what it was in
+ // the source image. Setting false will result in an unencrypted device,
+ // and true will result in an encrypted one.
+ Encrypted *bool `mapstructure:"encrypted" required:"false"`
+ // The number of I/O operations per second (IOPS) that
+ // the volume supports. See the documentation on
+ // IOPs
+ // for more information
+ IOPS int64 `mapstructure:"iops" required:"false"`
+ // Suppresses the specified device included in the
+ // block device mapping of the AMI.
+ NoDevice bool `mapstructure:"no_device" required:"false"`
+ // The ID of the snapshot.
+ SnapshotId string `mapstructure:"snapshot_id" required:"false"`
+ // The virtual device name. See the
+ // documentation on Block Device
+ // Mapping
+ // for more information.
+ VirtualName string `mapstructure:"virtual_name" required:"false"`
+ // The volume type. gp2 for General Purpose
+ // (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, st1 for
+ // Throughput Optimized HDD, sc1 for Cold HDD, and standard for
+ // Magnetic volumes.
+ VolumeType string `mapstructure:"volume_type" required:"false"`
+ // The size of the volume, in GiB. Required if
+ // not specifying a snapshot_id.
+ VolumeSize int64 `mapstructure:"volume_size" required:"false"`
+ // ID, alias or ARN of the KMS key to use for boot
+ // volume encryption. This only applies to the main region, other regions
+ // where the AMI will be copied will be encrypted by the default EBS KMS key.
+ // For valid formats see KmsKeyId in the AWS API docs -
+ // CopyImage.
+ // This field is validated by Packer, when using an alias, you will have to
+ // prefix kms_key_id with alias/.
+ KmsKeyId string `mapstructure:"kms_key_id" required:"false"`
// ebssurrogate only
OmitFromArtifact bool `mapstructure:"omit_from_artifact"`
}
@@ -31,11 +67,28 @@ type BlockDevices struct {
}
type AMIBlockDevices struct {
- AMIMappings []BlockDevice `mapstructure:"ami_block_device_mappings"`
+ // Add one or
+ // more block device
+ // mappings
+ // to the AMI. These will be attached when booting a new instance from your
+ // AMI. If this field is populated, and you are building from an existing source image,
+ // the block device mappings in the source image will be overwritten. This means you
+ // must have a block device mapping entry for your root volume, root_volume_size,
+ // and root_device_name. `Your options here may vary depending on the type of VM
+ // you use. The block device mappings allow for the following configuration:
+ AMIMappings []BlockDevice `mapstructure:"ami_block_device_mappings" required:"false"`
}
type LaunchBlockDevices struct {
- LaunchMappings []BlockDevice `mapstructure:"launch_block_device_mappings"`
+ // Add one
+ // or more block devices before the Packer build starts. If you add instance
+ // store volumes or EBS volumes in addition to the root device volume, the
+ // created AMI will contain block device mapping information for those
+ // volumes. Amazon creates snapshots of the source instance's root volume and
+ // any other EBS volumes described here. When you launch an instance from this
+ // new AMI, the instance automatically launches with these additional volumes,
+ // and will restore them from snapshots taken from the source instance.
+ LaunchMappings []BlockDevice `mapstructure:"launch_block_device_mappings" required:"false"`
}
func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping {
diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go
index 94b508e60..325433d54 100644
--- a/builder/amazon/common/run_config.go
+++ b/builder/amazon/common/run_config.go
@@ -58,34 +58,148 @@ func (d *SecurityGroupFilterOptions) Empty() bool {
// RunConfig contains configuration for running an instance from a source
// AMI and details on how to access that launched image.
type RunConfig struct {
- AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address"`
- AvailabilityZone string `mapstructure:"availability_zone"`
- BlockDurationMinutes int64 `mapstructure:"block_duration_minutes"`
- DisableStopInstance bool `mapstructure:"disable_stop_instance"`
- EbsOptimized bool `mapstructure:"ebs_optimized"`
- EnableT2Unlimited bool `mapstructure:"enable_t2_unlimited"`
- IamInstanceProfile string `mapstructure:"iam_instance_profile"`
- InstanceInitiatedShutdownBehavior string `mapstructure:"shutdown_behavior"`
- InstanceType string `mapstructure:"instance_type"`
- SecurityGroupFilter SecurityGroupFilterOptions `mapstructure:"security_group_filter"`
- RunTags map[string]string `mapstructure:"run_tags"`
- SecurityGroupId string `mapstructure:"security_group_id"`
- SecurityGroupIds []string `mapstructure:"security_group_ids"`
- SourceAmi string `mapstructure:"source_ami"`
- SourceAmiFilter AmiFilterOptions `mapstructure:"source_ami_filter"`
- SpotInstanceTypes []string `mapstructure:"spot_instance_types"`
- SpotPrice string `mapstructure:"spot_price"`
- SpotPriceAutoProduct string `mapstructure:"spot_price_auto_product"`
- SpotTags map[string]string `mapstructure:"spot_tags"`
- SubnetFilter SubnetFilterOptions `mapstructure:"subnet_filter"`
- SubnetId string `mapstructure:"subnet_id"`
- TemporaryKeyPairName string `mapstructure:"temporary_key_pair_name"`
- TemporarySGSourceCidrs []string `mapstructure:"temporary_security_group_source_cidrs"`
- UserData string `mapstructure:"user_data"`
- UserDataFile string `mapstructure:"user_data_file"`
- VpcFilter VpcFilterOptions `mapstructure:"vpc_filter"`
- VpcId string `mapstructure:"vpc_id"`
- WindowsPasswordTimeout time.Duration `mapstructure:"windows_password_timeout"`
+ // If using a non-default VPC,
+ // public IP addresses are not provided by default. If this is true, your
+ // new instance will get a Public IP. default: false
+ AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address" required:"false"`
+ // Destination availability zone to launch
+ // instance in. Leave this empty to allow Amazon to auto-assign.
+ AvailabilityZone string `mapstructure:"availability_zone" required:"false"`
+ // Requires spot_price to be set. The
+ // required duration for the Spot Instances (also known as Spot blocks). This
+ // value must be a multiple of 60 (60, 120, 180, 240, 300, or 360). You can't
+ // specify an Availability Zone group or a launch group if you specify a
+ // duration.
+ BlockDurationMinutes int64 `mapstructure:"block_duration_minutes" required:"false"`
+ // Packer normally stops the build
+ // instance after all provisioners have run. For Windows instances, it is
+ // sometimes desirable to run
+ // Sysprep
+ // which will stop the instance for you. If this is set to true, Packer
+ // will not stop the instance but will assume that you will send the stop
+ // signal yourself through your final provisioner. You can do this with a
+ // windows-shell
+ // provisioner.
+ DisableStopInstance bool `mapstructure:"disable_stop_instance" required:"false"`
+ // Mark instance as EBS
+ // Optimized.
+ // Default false.
+ EbsOptimized bool `mapstructure:"ebs_optimized" required:"false"`
+ // Enabling T2 Unlimited allows the source
+ // instance to burst additional CPU beyond its available CPU
+ // Credits
+ // for as long as the demand exists. This is in contrast to the standard
+ // configuration that only allows an instance to consume up to its available
+ // CPU Credits. See the AWS documentation for T2
+ // Unlimited
+ // and the T2 Unlimited Pricing section of the Amazon EC2 On-Demand
+ // Pricing document for more
+ // information. By default this option is disabled and Packer will set up a
+ // T2
+ // Standard
+ // instance instead.
+ EnableT2Unlimited bool `mapstructure:"enable_t2_unlimited" required:"false"`
+ // The name of an IAM instance
+ // profile
+ // to launch the EC2 instance with.
+ IamInstanceProfile string `mapstructure:"iam_instance_profile" required:"false"`
+ // Automatically terminate instances on
+ // shutdown in case Packer exits ungracefully. Possible values are stop and
+ // terminate. Defaults to stop.
+ InstanceInitiatedShutdownBehavior string `mapstructure:"shutdown_behavior" required:"false"`
+ // The EC2 instance type to use while building the
+ // AMI, such as t2.small.
+ InstanceType string `mapstructure:"instance_type" required:"true"`
+ // Filters used to populate the
+ // security_group_ids field. Example:
+ SecurityGroupFilter SecurityGroupFilterOptions `mapstructure:"security_group_filter" required:"false"`
+ // Tags to apply to the instance
+ // that is launched to create the AMI. These tags are not applied to the
+ // resulting AMI unless they're duplicated in tags. This is a template
+ // engine, see Build template
+ // data for more information.
+ RunTags map[string]string `mapstructure:"run_tags" required:"false"`
+ // The ID (not the name) of the security
+ // group to assign to the instance. By default this is not set and Packer will
+ // automatically create a new temporary security group to allow SSH access.
+ // Note that if this is specified, you must be sure the security group allows
+ // access to the ssh_port given below.
+ SecurityGroupId string `mapstructure:"security_group_id" required:"false"`
+ // A list of security groups as
+ // described above. Note that if this is specified, you must omit the
+ // security_group_id.
+ SecurityGroupIds []string `mapstructure:"security_group_ids" required:"false"`
+ // The source AMI whose root volume will be copied and
+ // provisioned on the currently running instance. This must be an EBS-backed
+ // AMI with a root volume snapshot that you have access to. Note: this is not
+ // used when from_scratch is set to true.
+ SourceAmi string `mapstructure:"source_ami" required:"true"`
+ // Filters used to populate the source_ami
+ // field. Example:
+ SourceAmiFilter AmiFilterOptions `mapstructure:"source_ami_filter" required:"false"`
+ // a list of acceptable instance
+ // types to run your build on. We will request a spot instance using the max
+ // price of spot_price and the allocation strategy of "lowest price".
+ // Your instance will be launched on an instance type of the lowest available
+ // price that you have in your list. This is used in place of instance_type.
+ // You may only set either spot_instance_types or instance_type, not both.
+ // This feature exists to help prevent situations where a Packer build fails
+ // because a particular availability zone does not have capacity for the
+ // specific instance_type requested in instance_type.
+ SpotInstanceTypes []string `mapstructure:"spot_instance_types" required:"false"`
+ // The maximum hourly price to pay for a spot instance
+ // to create the AMI. Spot instances are a type of instance that EC2 starts
+ // when the current spot price is less than the maximum price you specify.
+ // Spot price will be updated based on available spot instance capacity and
+ // current spot instance requests. It may save you some costs. You can set
+ // this to auto for Packer to automatically discover the best spot price or
+ // to "0" to use an on demand instance (default).
+ SpotPrice string `mapstructure:"spot_price" required:"false"`
+ // Required if spot_price is set to
+ // auto. This tells Packer what sort of AMI you're launching to find the
+ // best spot price. This must be one of: Linux/UNIX, SUSE Linux,
+ // Windows, Linux/UNIX (Amazon VPC), SUSE Linux (Amazon VPC),
+ // Windows (Amazon VPC)
+ SpotPriceAutoProduct string `mapstructure:"spot_price_auto_product" required:"false"`
+ // Requires spot_price to be
+ // set. This tells Packer to apply tags to the spot request that is issued.
+ SpotTags map[string]string `mapstructure:"spot_tags" required:"false"`
+ // Filters used to populate the subnet_id field.
+ // Example:
+ SubnetFilter SubnetFilterOptions `mapstructure:"subnet_filter" required:"false"`
+ // If using VPC, the ID of the subnet, such as
+ // subnet-12345def, where Packer will launch the EC2 instance. This field is
+ // required if you are using an non-default VPC.
+ SubnetId string `mapstructure:"subnet_id" required:"false"`
+ // The name of the temporary key pair to
+ // generate. By default, Packer generates a name that looks like
+ // packer_, where is a 36 character unique identifier.
+ TemporaryKeyPairName string `mapstructure:"temporary_key_pair_name" required:"false"`
+ // A list of IPv4
+ // CIDR blocks to be authorized access to the instance, when packer is creating a temporary security group.
+ TemporarySGSourceCidrs []string `mapstructure:"temporary_security_group_source_cidrs" required:"false"`
+ // User data to apply when launching the instance. Note
+ // that you need to be careful about escaping characters due to the templates
+ // being JSON. It is often more convenient to use user_data_file, instead.
+ // Packer will not automatically wait for a user script to finish before
+ // shutting down the instance this must be handled in a provisioner.
+ UserData string `mapstructure:"user_data" required:"false"`
+ // Path to a file that will be used for the user
+ // data when launching the instance.
+ UserDataFile string `mapstructure:"user_data_file" required:"false"`
+ // Filters used to populate the vpc_id field.
+ // vpc_id take precedence over this.
+ // Example:
+ VpcFilter VpcFilterOptions `mapstructure:"vpc_filter" required:"false"`
+ // If launching into a VPC subnet, Packer needs the VPC ID
+ // in order to create a temporary security group within the VPC. Requires
+ // subnet_id to be set. If this field is left blank, Packer will try to get
+ // the VPC ID from the subnet_id.
+ VpcId string `mapstructure:"vpc_id" required:"false"`
+ // The timeout for waiting for a Windows
+ // password for Windows instances. Defaults to 20 minutes. Example value:
+ // 10m
+ WindowsPasswordTimeout time.Duration `mapstructure:"windows_password_timeout" required:"false"`
// Communicator settings
Comm communicator.Config `mapstructure:",squash"`
diff --git a/builder/amazon/ebssurrogate/builder.go b/builder/amazon/ebssurrogate/builder.go
index 31fec0bb1..7047662cf 100644
--- a/builder/amazon/ebssurrogate/builder.go
+++ b/builder/amazon/ebssurrogate/builder.go
@@ -25,10 +25,14 @@ type Config struct {
awscommon.RunConfig `mapstructure:",squash"`
awscommon.BlockDevices `mapstructure:",squash"`
awscommon.AMIConfig `mapstructure:",squash"`
-
- RootDevice RootBlockDevice `mapstructure:"ami_root_device"`
+ // A block device mapping
+ // describing the root device of the AMI. This looks like the mappings in
+ // ami_block_device_mapping, except with an additional field:
+ RootDevice RootBlockDevice `mapstructure:"ami_root_device" required:"true"`
VolumeRunTags awscommon.TagMap `mapstructure:"run_volume_tags"`
- Architecture string `mapstructure:"ami_architecture"`
+ // what architecture to use when registering the
+ // final AMI; valid options are "x86_64" or "arm64". Defaults to "x86_64".
+ Architecture string `mapstructure:"ami_architecture" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/amazon/ebssurrogate/root_block_device.go b/builder/amazon/ebssurrogate/root_block_device.go
index 09c13ca00..543d15262 100644
--- a/builder/amazon/ebssurrogate/root_block_device.go
+++ b/builder/amazon/ebssurrogate/root_block_device.go
@@ -8,11 +8,29 @@ import (
type RootBlockDevice struct {
SourceDeviceName string `mapstructure:"source_device_name"`
- DeviceName string `mapstructure:"device_name"`
- DeleteOnTermination bool `mapstructure:"delete_on_termination"`
- IOPS int64 `mapstructure:"iops"`
- VolumeType string `mapstructure:"volume_type"`
- VolumeSize int64 `mapstructure:"volume_size"`
+ // The device name exposed to the instance (for
+ // example, /dev/sdh or xvdh). Required for every device in the block
+ // device mapping.
+ DeviceName string `mapstructure:"device_name" required:"false"`
+ // Indicates whether the EBS volume is
+ // deleted on instance termination. Default false. NOTE: If this
+ // value is not explicitly set to true and volumes are not cleaned up by
+ // an alternative method, additional volumes will accumulate after every
+ // build.
+ DeleteOnTermination bool `mapstructure:"delete_on_termination" required:"false"`
+ // The number of I/O operations per second (IOPS) that
+ // the volume supports. See the documentation on
+ // IOPs
+ // for more information
+ IOPS int64 `mapstructure:"iops" required:"false"`
+ // The volume type. gp2 for General Purpose
+ // (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, st1 for
+ // Throughput Optimized HDD, sc1 for Cold HDD, and standard for
+ // Magnetic volumes.
+ VolumeType string `mapstructure:"volume_type" required:"false"`
+ // The size of the volume, in GiB. Required if
+ // not specifying a snapshot_id.
+ VolumeSize int64 `mapstructure:"volume_size" required:"false"`
}
func (c *RootBlockDevice) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/amazon/ebsvolume/block_device.go b/builder/amazon/ebsvolume/block_device.go
index 0d2b04613..ebaffff56 100644
--- a/builder/amazon/ebsvolume/block_device.go
+++ b/builder/amazon/ebsvolume/block_device.go
@@ -7,7 +7,10 @@ import (
type BlockDevice struct {
awscommon.BlockDevice `mapstructure:"-,squash"`
- Tags awscommon.TagMap `mapstructure:"tags"`
+ // Tags applied to the AMI. This is a
+ // template engine, see Build template
+ // data for more information.
+ Tags awscommon.TagMap `mapstructure:"tags" required:"false"`
}
func commonBlockDevices(mappings []BlockDevice, ctx *interpolate.Context) (awscommon.BlockDevices, error) {
diff --git a/builder/amazon/ebsvolume/builder.go b/builder/amazon/ebsvolume/builder.go
index c7d7b539c..66c0d05c1 100644
--- a/builder/amazon/ebsvolume/builder.go
+++ b/builder/amazon/ebsvolume/builder.go
@@ -22,10 +22,26 @@ type Config struct {
common.PackerConfig `mapstructure:",squash"`
awscommon.AccessConfig `mapstructure:",squash"`
awscommon.RunConfig `mapstructure:",squash"`
-
- VolumeMappings []BlockDevice `mapstructure:"ebs_volumes"`
- AMIENASupport *bool `mapstructure:"ena_support"`
- AMISriovNetSupport bool `mapstructure:"sriov_support"`
+ // Add the block device
+ // mappings to the AMI. The block device mappings allow for keys:
+ VolumeMappings []BlockDevice `mapstructure:"ebs_volumes" required:"false"`
+ // Enable enhanced networking (ENA but not
+ // SriovNetSupport) on HVM-compatible AMIs. If set, add
+ // ec2:ModifyInstanceAttribute to your AWS IAM policy. If false, this will
+ // disable enhanced networking in the final AMI as opposed to passing the
+ // setting through unchanged from the source. Note: you must make sure
+ // enhanced networking is enabled on your instance. See Amazon's
+ // documentation on enabling enhanced
+ // networking.
+ AMIENASupport *bool `mapstructure:"ena_support" required:"false"`
+ // Enable enhanced networking (SriovNetSupport but
+ // not ENA) on HVM-compatible AMIs. If true, add
+ // ec2:ModifyInstanceAttribute to your AWS IAM policy. Note: you must make
+ // sure enhanced networking is enabled on your instance. See Amazon's
+ // documentation on enabling enhanced
+ // networking.
+ // Default false.
+ AMISriovNetSupport bool `mapstructure:"sriov_support" required:"false"`
launchBlockDevices awscommon.BlockDevices
ctx interpolate.Context
diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go
index d15498694..6f64f8733 100644
--- a/builder/amazon/instance/builder.go
+++ b/builder/amazon/instance/builder.go
@@ -30,16 +30,44 @@ type Config struct {
awscommon.AMIConfig `mapstructure:",squash"`
awscommon.BlockDevices `mapstructure:",squash"`
awscommon.RunConfig `mapstructure:",squash"`
-
- AccountId string `mapstructure:"account_id"`
- BundleDestination string `mapstructure:"bundle_destination"`
- BundlePrefix string `mapstructure:"bundle_prefix"`
- BundleUploadCommand string `mapstructure:"bundle_upload_command"`
- BundleVolCommand string `mapstructure:"bundle_vol_command"`
- S3Bucket string `mapstructure:"s3_bucket"`
- X509CertPath string `mapstructure:"x509_cert_path"`
- X509KeyPath string `mapstructure:"x509_key_path"`
- X509UploadPath string `mapstructure:"x509_upload_path"`
+ // Your AWS account ID. This is required for bundling
+ // the AMI. This is not the same as the access key. You can find your
+ // account ID in the security credentials page of your AWS account.
+ AccountId string `mapstructure:"account_id" required:"true"`
+ // The directory on the running instance where
+ // the bundled AMI will be saved prior to uploading. By default this is
+ // /tmp. This directory must exist and be writable.
+ BundleDestination string `mapstructure:"bundle_destination" required:"false"`
+ // The prefix for files created from bundling the
+ // root volume. By default this is image-{{timestamp}}. The timestamp
+ // variable should be used to make sure this is unique, otherwise it can
+ // collide with other created AMIs by Packer in your account.
+ BundlePrefix string `mapstructure:"bundle_prefix" required:"false"`
+ // The command to use to upload the bundled
+ // volume. See the "custom bundle commands" section below for more
+ // information.
+ BundleUploadCommand string `mapstructure:"bundle_upload_command" required:"false"`
+ // The command to use to bundle the volume.
+ // See the "custom bundle commands" section below for more information.
+ BundleVolCommand string `mapstructure:"bundle_vol_command" required:"false"`
+ // The name of the S3 bucket to upload the AMI. This
+ // bucket will be created if it doesn't exist.
+ S3Bucket string `mapstructure:"s3_bucket" required:"true"`
+ // The local path to a valid X509 certificate for
+ // your AWS account. This is used for bundling the AMI. This X509 certificate
+ // must be registered with your account from the security credentials page in
+ // the AWS console.
+ X509CertPath string `mapstructure:"x509_cert_path" required:"true"`
+ // The local path to the private key for the X509
+ // certificate specified by x509_cert_path. This is used for bundling the
+ // AMI.
+ X509KeyPath string `mapstructure:"x509_key_path" required:"true"`
+ // The path on the remote machine where the X509
+ // certificate will be uploaded. This path must already exist and be writable.
+ // X509 certificates are uploaded after provisioning is run, so it is
+ // perfectly okay to create this directory as part of the provisioning
+ // process. Defaults to /tmp.
+ X509UploadPath string `mapstructure:"x509_upload_path" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/azure/arm/clientconfig.go b/builder/azure/arm/clientconfig.go
index f178b8b50..4cfac9922 100644
--- a/builder/azure/arm/clientconfig.go
+++ b/builder/azure/arm/clientconfig.go
@@ -14,9 +14,10 @@ import (
// ClientConfig allows for various ways to authenticate Azure clients
type ClientConfig struct {
- // Describes where API's are
-
- CloudEnvironmentName string `mapstructure:"cloud_environment_name"`
+ // One of Public, China, Germany, or
+ // USGovernment. Defaults to Public. Long forms such as
+ // USGovernmentCloud and AzureUSGovernmentCloud are also supported.
+ CloudEnvironmentName string `mapstructure:"cloud_environment_name" required:"false"`
cloudEnvironment *azure.Environment
// Authentication fields
@@ -30,7 +31,10 @@ type ClientConfig struct {
// JWT bearer token for client auth (RFC 7523, Sec. 2.2)
ClientJWT string `mapstructure:"client_jwt"`
ObjectID string `mapstructure:"object_id"`
- TenantID string `mapstructure:"tenant_id"`
+ // The account identifier with which your client_id and
+ // subscription_id are associated. If not specified, tenant_id will be
+ // looked up using subscription_id.
+ TenantID string `mapstructure:"tenant_id" required:"false"`
SubscriptionID string `mapstructure:"subscription_id"`
}
diff --git a/builder/azure/arm/config.go b/builder/azure/arm/config.go
index df0b7a4f6..aca143ff8 100644
--- a/builder/azure/arm/config.go
+++ b/builder/azure/arm/config.go
@@ -70,7 +70,12 @@ type SharedImageGallery struct {
ResourceGroup string `mapstructure:"resource_group"`
GalleryName string `mapstructure:"gallery_name"`
ImageName string `mapstructure:"image_name"`
- ImageVersion string `mapstructure:"image_version"`
+ // Specify a specific version of an OS to boot from.
+ // Defaults to latest. There may be a difference in versions available
+ // across regions due to image synchronization latency. To ensure a consistent
+ // version across regions set this value to one that is available in all
+ // regions where you are deploying.
+ ImageVersion string `mapstructure:"image_version" required:"false"`
}
type Config struct {
@@ -82,56 +87,147 @@ type Config struct {
// Capture
CaptureNamePrefix string `mapstructure:"capture_name_prefix"`
CaptureContainerName string `mapstructure:"capture_container_name"`
-
- // Shared Gallery
- SharedGallery SharedImageGallery `mapstructure:"shared_image_gallery"`
-
- // Compute
- ImagePublisher string `mapstructure:"image_publisher"`
- ImageOffer string `mapstructure:"image_offer"`
- ImageSku string `mapstructure:"image_sku"`
- ImageVersion string `mapstructure:"image_version"`
- ImageUrl string `mapstructure:"image_url"`
-
- CustomManagedImageResourceGroupName string `mapstructure:"custom_managed_image_resource_group_name"`
- CustomManagedImageName string `mapstructure:"custom_managed_image_name"`
+ // Use a Shared Gallery
+ // image
+ // as the source for this build. VHD targets are incompatible with this build
+ // type - the target must be a Managed Image.
+ SharedGallery SharedImageGallery `mapstructure:"shared_image_gallery" required:"false"`
+ // PublisherName for your base image. See
+ // documentation
+ // for details.
+ ImagePublisher string `mapstructure:"image_publisher" required:"true"`
+ // Offer for your base image. See
+ // documentation
+ // for details.
+ ImageOffer string `mapstructure:"image_offer" required:"true"`
+ // SKU for your base image. See
+ // documentation
+ // for details.
+ ImageSku string `mapstructure:"image_sku" required:"true"`
+ // Specify a specific version of an OS to boot from.
+ // Defaults to latest. There may be a difference in versions available
+ // across regions due to image synchronization latency. To ensure a consistent
+ // version across regions set this value to one that is available in all
+ // regions where you are deploying.
+ ImageVersion string `mapstructure:"image_version" required:"false"`
+ // Specify a custom VHD to use. If this value is set, do
+ // not set image_publisher, image_offer, image_sku, or image_version.
+ ImageUrl string `mapstructure:"image_url" required:"false"`
+ // Specify the source
+ // managed image's resource group used to use. If this value is set, do not
+ // set image_publisher, image_offer, image_sku, or image_version. If this
+ // value is set, the value custom_managed_image_name must also be set. See
+ // documentation
+ // to learn more about managed images.
+ CustomManagedImageResourceGroupName string `mapstructure:"custom_managed_image_resource_group_name" required:"false"`
+ // Specify the source managed image's
+ // name to use. If this value is set, do not set image_publisher,
+ // image_offer, image_sku, or image_version. If this value is set, the
+ // value custom_managed_image_resource_group_name must also be set. See
+ // documentation
+ // to learn more about managed images.
+ CustomManagedImageName string `mapstructure:"custom_managed_image_name" required:"false"`
customManagedImageID string
Location string `mapstructure:"location"`
- VMSize string `mapstructure:"vm_size"`
+ // Size of the VM used for building. This can be changed
+ // when you deploy a VM from your VHD. See
+ // pricing
+ // information. Defaults to Standard_A1.
+ VMSize string `mapstructure:"vm_size" required:"false"`
ManagedImageResourceGroupName string `mapstructure:"managed_image_resource_group_name"`
ManagedImageName string `mapstructure:"managed_image_name"`
- ManagedImageStorageAccountType string `mapstructure:"managed_image_storage_account_type"`
+ // Specify the storage account
+ // type for a managed image. Valid values are Standard_LRS and Premium_LRS.
+ // The default is Standard_LRS.
+ ManagedImageStorageAccountType string `mapstructure:"managed_image_storage_account_type" required:"false"`
managedImageStorageAccountType compute.StorageAccountTypes
- ManagedImageOSDiskSnapshotName string `mapstructure:"managed_image_os_disk_snapshot_name"`
- ManagedImageDataDiskSnapshotPrefix string `mapstructure:"managed_image_data_disk_snapshot_prefix"`
+ // If
+ // managed_image_os_disk_snapshot_name is set, a snapshot of the OS disk
+ // is created with the same name as this value before the VM is captured.
+ ManagedImageOSDiskSnapshotName string `mapstructure:"managed_image_os_disk_snapshot_name" required:"false"`
+ // If
+ // managed_image_data_disk_snapshot_prefix is set, snapshot of the data
+ // disk(s) is created with the same prefix as this value before the VM is
+ // captured.
+ ManagedImageDataDiskSnapshotPrefix string `mapstructure:"managed_image_data_disk_snapshot_prefix" required:"false"`
manageImageLocation string
- ManagedImageZoneResilient bool `mapstructure:"managed_image_zone_resilient"`
-
- // Deployment
- AzureTags map[string]*string `mapstructure:"azure_tags"`
+ // Store the image in zone-resilient storage. You need to create it
+ // in a region that supports availability zones.
+ ManagedImageZoneResilient bool `mapstructure:"managed_image_zone_resilient" required:"false"`
+ // the user can define up to 15
+ // tags. Tag names cannot exceed 512 characters, and tag values cannot exceed
+ // 256 characters. Tags are applied to every resource deployed by a Packer
+ // build, i.e. Resource Group, VM, NIC, VNET, Public IP, KeyVault, etc.
+ AzureTags map[string]*string `mapstructure:"azure_tags" required:"false"`
ResourceGroupName string `mapstructure:"resource_group_name"`
StorageAccount string `mapstructure:"storage_account"`
- TempComputeName string `mapstructure:"temp_compute_name"`
+ // temporary name assigned to the VM. If this
+ // value is not set, a random value will be assigned. Knowing the resource
+ // group and VM name allows one to execute commands to update the VM during a
+ // Packer build, e.g. attach a resource disk to the VM.
+ TempComputeName string `mapstructure:"temp_compute_name" required:"false"`
TempResourceGroupName string `mapstructure:"temp_resource_group_name"`
BuildResourceGroupName string `mapstructure:"build_resource_group_name"`
storageAccountBlobEndpoint string
- PrivateVirtualNetworkWithPublicIp bool `mapstructure:"private_virtual_network_with_public_ip"`
- VirtualNetworkName string `mapstructure:"virtual_network_name"`
- VirtualNetworkSubnetName string `mapstructure:"virtual_network_subnet_name"`
- VirtualNetworkResourceGroupName string `mapstructure:"virtual_network_resource_group_name"`
- CustomDataFile string `mapstructure:"custom_data_file"`
+ // This value allows you to
+ // set a virtual_network_name and obtain a public IP. If this value is not
+ // set and virtual_network_name is defined Packer is only allowed to be
+ // executed from a host on the same subnet / virtual network.
+ PrivateVirtualNetworkWithPublicIp bool `mapstructure:"private_virtual_network_with_public_ip" required:"false"`
+ // Use a pre-existing virtual network for the
+ // VM. This option enables private communication with the VM, no public IP
+ // address is used or provisioned (unless you set
+ // private_virtual_network_with_public_ip).
+ VirtualNetworkName string `mapstructure:"virtual_network_name" required:"false"`
+ // If virtual_network_name is set,
+ // this value may also be set. If virtual_network_name is set, and this
+ // value is not set the builder attempts to determine the subnet to use with
+ // the virtual network. If the subnet cannot be found, or it cannot be
+ // disambiguated, this value should be set.
+ VirtualNetworkSubnetName string `mapstructure:"virtual_network_subnet_name" required:"false"`
+ // If virtual_network_name is
+ // set, this value may also be set. If virtual_network_name is set, and
+ // this value is not set the builder attempts to determine the resource group
+ // containing the virtual network. If the resource group cannot be found, or
+ // it cannot be disambiguated, this value should be set.
+ VirtualNetworkResourceGroupName string `mapstructure:"virtual_network_resource_group_name" required:"false"`
+ // Specify a file containing custom data to inject
+ // into the cloud-init process. The contents of the file are read and injected
+ // into the ARM template. The custom data will be passed to cloud-init for
+ // processing at the time of provisioning. See
+ // documentation
+ // to learn more about custom data, and how it can be used to influence the
+ // provisioning process.
+ CustomDataFile string `mapstructure:"custom_data_file" required:"false"`
customData string
- PlanInfo PlanInformation `mapstructure:"plan_info"`
-
- // OS
- OSType string `mapstructure:"os_type"`
- OSDiskSizeGB int32 `mapstructure:"os_disk_size_gb"`
-
- // Additional Disks
- AdditionalDiskSize []int32 `mapstructure:"disk_additional_size"`
- DiskCachingType string `mapstructure:"disk_caching_type"`
+ // Used for creating images from Marketplace images.
+ // Please refer to Deploy an image with Marketplace
+ // terms for more details. Not
+ // all Marketplace images support programmatic deployment, and support is
+ // controlled by the image publisher.
+ PlanInfo PlanInformation `mapstructure:"plan_info" required:"false"`
+ // If either Linux or Windows is specified Packer will
+ // automatically configure authentication credentials for the provisioned
+ // machine. For Linux this configures an SSH authorized key. For Windows
+ // this configures a WinRM certificate.
+ OSType string `mapstructure:"os_type" required:"false"`
+ // Specify the size of the OS disk in GB
+ // (gigabytes). Values of zero or less than zero are ignored.
+ OSDiskSizeGB int32 `mapstructure:"os_disk_size_gb" required:"false"`
+ // The size(s) of any additional
+ // hard disks for the VM in gigabytes. If this is not specified then the VM
+ // will only contain an OS disk. The number of additional disks and maximum
+ // size of a disk depends on the configuration of your VM. See
+ // Windows
+ // or
+ // Linux
+ // for more information.
+ AdditionalDiskSize []int32 `mapstructure:"disk_additional_size" required:"false"`
+ // Specify the disk caching type. Valid values
+ // are None, ReadOnly, and ReadWrite. The default value is ReadWrite.
+ DiskCachingType string `mapstructure:"disk_caching_type" required:"false"`
diskCachingType compute.CachingTypes
// Runtime Values
@@ -158,9 +254,11 @@ type Config struct {
Comm communicator.Config `mapstructure:",squash"`
ctx interpolate.Context
-
- //Cleanup
- AsyncResourceGroupDelete bool `mapstructure:"async_resourcegroup_delete"`
+ // If you want packer to delete the
+ // temporary resource group asynchronously set this value. It's a boolean
+ // value and defaults to false. Important Setting this true means that
+ // your builds are faster, however any failed deletes are not reported.
+ AsyncResourceGroupDelete bool `mapstructure:"async_resourcegroup_delete" required:"false"`
}
type keyVaultCertificate struct {
diff --git a/builder/cloudstack/config.go b/builder/cloudstack/config.go
index 00f79618b..94f76347c 100644
--- a/builder/cloudstack/config.go
+++ b/builder/cloudstack/config.go
@@ -19,44 +19,130 @@ type Config struct {
common.PackerConfig `mapstructure:",squash"`
common.HTTPConfig `mapstructure:",squash"`
Comm communicator.Config `mapstructure:",squash"`
-
- APIURL string `mapstructure:"api_url"`
- APIKey string `mapstructure:"api_key"`
- SecretKey string `mapstructure:"secret_key"`
- AsyncTimeout time.Duration `mapstructure:"async_timeout"`
- HTTPGetOnly bool `mapstructure:"http_get_only"`
- SSLNoVerify bool `mapstructure:"ssl_no_verify"`
-
- CIDRList []string `mapstructure:"cidr_list"`
- CreateSecurityGroup bool `mapstructure:"create_security_group"`
- DiskOffering string `mapstructure:"disk_offering"`
- DiskSize int64 `mapstructure:"disk_size"`
- Expunge bool `mapstructure:"expunge"`
- Hypervisor string `mapstructure:"hypervisor"`
- InstanceName string `mapstructure:"instance_name"`
- Network string `mapstructure:"network"`
- Project string `mapstructure:"project"`
- PublicIPAddress string `mapstructure:"public_ip_address"`
- PublicPort int `mapstructure:"public_port"`
- SecurityGroups []string `mapstructure:"security_groups"`
- ServiceOffering string `mapstructure:"service_offering"`
- PreventFirewallChanges bool `mapstructure:"prevent_firewall_changes"`
- SourceISO string `mapstructure:"source_iso"`
- SourceTemplate string `mapstructure:"source_template"`
- TemporaryKeypairName string `mapstructure:"temporary_keypair_name"`
- UseLocalIPAddress bool `mapstructure:"use_local_ip_address"`
- UserData string `mapstructure:"user_data"`
- UserDataFile string `mapstructure:"user_data_file"`
- Zone string `mapstructure:"zone"`
-
- TemplateName string `mapstructure:"template_name"`
- TemplateDisplayText string `mapstructure:"template_display_text"`
- TemplateOS string `mapstructure:"template_os"`
- TemplateFeatured bool `mapstructure:"template_featured"`
- TemplatePublic bool `mapstructure:"template_public"`
- TemplatePasswordEnabled bool `mapstructure:"template_password_enabled"`
- TemplateRequiresHVM bool `mapstructure:"template_requires_hvm"`
- TemplateScalable bool `mapstructure:"template_scalable"`
+ // The CloudStack API endpoint we will connect to. It can
+ // also be specified via environment variable CLOUDSTACK_API_URL, if set.
+ APIURL string `mapstructure:"api_url" required:"true"`
+ // The API key used to sign all API requests. It can also
+ // be specified via environment variable CLOUDSTACK_API_KEY, if set.
+ APIKey string `mapstructure:"api_key" required:"true"`
+ // The secret key used to sign all API requests. It
+ // can also be specified via environment variable CLOUDSTACK_SECRET_KEY, if
+ // set.
+ SecretKey string `mapstructure:"secret_key" required:"true"`
+ // The time duration to wait for async calls to
+ // finish. Defaults to 30m.
+ AsyncTimeout time.Duration `mapstructure:"async_timeout" required:"false"`
+ // Some cloud providers only allow HTTP GET calls
+ // to their CloudStack API. If using such a provider, you need to set this to
+ // true in order for the provider to only make GET calls and no POST calls.
+ HTTPGetOnly bool `mapstructure:"http_get_only" required:"false"`
+ // Set to true to skip SSL verification.
+ // Defaults to false.
+ SSLNoVerify bool `mapstructure:"ssl_no_verify" required:"false"`
+ // List of CIDR's that will have access to the new
+ // instance. This is needed in order for any provisioners to be able to
+ // connect to the instance. Defaults to [ "0.0.0.0/0" ]. Only required when
+ // use_local_ip_address is false.
+ CIDRList []string `mapstructure:"cidr_list" required:"false"`
+ // If true a temporary security group
+ // will be created which allows traffic towards the instance from the
+ // cidr_list. This option will be ignored if security_groups is also
+ // defined. Requires expunge set to true. Defaults to false.
+ CreateSecurityGroup bool `mapstructure:"create_security_group" required:"false"`
+ // The name or ID of the disk offering used for the
+ // instance. This option is only available (and also required) when using
+ // source_iso.
+ DiskOffering string `mapstructure:"disk_offering" required:"false"`
+ // The size (in GB) of the root disk of the new
+ // instance. This option is only available when using source_template.
+ DiskSize int64 `mapstructure:"disk_size" required:"false"`
+ // Set to true to expunge the instance when it is
+ // destroyed. Defaults to false.
+ Expunge bool `mapstructure:"expunge" required:"false"`
+ // The target hypervisor (e.g. XenServer, KVM) for
+ // the new template. This option is required when using source_iso.
+ Hypervisor string `mapstructure:"hypervisor" required:"false"`
+ // The name of the instance. Defaults to
+ // "packer-UUID" where UUID is dynamically generated.
+ InstanceName string `mapstructure:"instance_name" required:"false"`
+ // The name or ID of the network to connect the instance
+ // to.
+ Network string `mapstructure:"network" required:"true"`
+ // The name or ID of the project to deploy the instance
+ // to.
+ Project string `mapstructure:"project" required:"false"`
+ // The public IP address or it's ID used for
+ // connecting any provisioners to. If not provided, a temporary public IP
+ // address will be associated and released during the Packer run.
+ PublicIPAddress string `mapstructure:"public_ip_address" required:"false"`
+ // The fixed port you want to configure in the port
+ // forwarding rule. Set this attribute if you do not want to use the a random
+ // public port.
+ PublicPort int `mapstructure:"public_port" required:"false"`
+ // A list of security group IDs or
+ // names to associate the instance with.
+ SecurityGroups []string `mapstructure:"security_groups" required:"false"`
+ // The name or ID of the service offering used
+ // for the instance.
+ ServiceOffering string `mapstructure:"service_offering" required:"true"`
+ // Set to true to prevent network
+ // ACLs or firewall rules creation. Defaults to false.
+ PreventFirewallChanges bool `mapstructure:"prevent_firewall_changes" required:"false"`
+ // The name or ID of an ISO that will be mounted
+ // before booting the instance. This option is mutually exclusive with
+ // source_template. When using source_iso, both disk_offering and
+ // hypervisor are required.
+ SourceISO string `mapstructure:"source_iso" required:"true"`
+ // The name or ID of the template used as base
+ // template for the instance. This option is mutually exclusive with
+ // source_iso.
+ SourceTemplate string `mapstructure:"source_template" required:"true"`
+ // The name of the temporary SSH key pair
+ // to generate. By default, Packer generates a name that looks like
+ // packer_, where is a 36 character unique identifier.
+ TemporaryKeypairName string `mapstructure:"temporary_keypair_name" required:"false"`
+ // Set to true to indicate that the
+ // provisioners should connect to the local IP address of the instance.
+ UseLocalIPAddress bool `mapstructure:"use_local_ip_address" required:"false"`
+ // User data to launch with the instance. This is a
+ // template engine see User Data bellow for
+ // more details. Packer will not automatically wait for a user script to
+ // finish before shutting down the instance this must be handled in a
+ // provisioner.
+ UserData string `mapstructure:"user_data" required:"false"`
+ // Path to a file that will be used for the user
+ // data when launching the instance. This file will be parsed as a template
+ // engine see User Data bellow for more
+ // details.
+ UserDataFile string `mapstructure:"user_data_file" required:"false"`
+ // The name or ID of the zone where the instance will be
+ // created.
+ Zone string `mapstructure:"zone" required:"true"`
+ // The name of the new template. Defaults to
+ // "packer-{{timestamp}}" where timestamp will be the current time.
+ TemplateName string `mapstructure:"template_name" required:"false"`
+ // The display text of the new template.
+ // Defaults to the template_name.
+ TemplateDisplayText string `mapstructure:"template_display_text" required:"false"`
+ // The name or ID of the template OS for the new
+ // template that will be created.
+ TemplateOS string `mapstructure:"template_os" required:"true"`
+ // Set to true to indicate that the template
+ // is featured. Defaults to false.
+ TemplateFeatured bool `mapstructure:"template_featured" required:"false"`
+ // Set to true to indicate that the template
+ // is available for all accounts. Defaults to false.
+ TemplatePublic bool `mapstructure:"template_public" required:"false"`
+ // Set to true to indicate the
+ // template should be password enabled. Defaults to false.
+ TemplatePasswordEnabled bool `mapstructure:"template_password_enabled" required:"false"`
+ // Set to true to indicate the template
+ // requires hardware-assisted virtualization. Defaults to false.
+ TemplateRequiresHVM bool `mapstructure:"template_requires_hvm" required:"false"`
+ // Set to true to indicate that the template
+ // contains tools to support dynamic scaling of VM cpu/memory. Defaults to
+ // false.
+ TemplateScalable bool `mapstructure:"template_scalable" required:"false"`
TemplateTag string `mapstructure:"template_tag"`
Tags map[string]string `mapstructure:"tags"`
diff --git a/builder/digitalocean/config.go b/builder/digitalocean/config.go
index 451852829..ff654082c 100644
--- a/builder/digitalocean/config.go
+++ b/builder/digitalocean/config.go
@@ -19,24 +19,61 @@ import (
type Config struct {
common.PackerConfig `mapstructure:",squash"`
Comm communicator.Config `mapstructure:",squash"`
-
- APIToken string `mapstructure:"api_token"`
- APIURL string `mapstructure:"api_url"`
-
- Region string `mapstructure:"region"`
- Size string `mapstructure:"size"`
- Image string `mapstructure:"image"`
-
- PrivateNetworking bool `mapstructure:"private_networking"`
- Monitoring bool `mapstructure:"monitoring"`
- IPv6 bool `mapstructure:"ipv6"`
- SnapshotName string `mapstructure:"snapshot_name"`
- SnapshotRegions []string `mapstructure:"snapshot_regions"`
- StateTimeout time.Duration `mapstructure:"state_timeout"`
- DropletName string `mapstructure:"droplet_name"`
- UserData string `mapstructure:"user_data"`
- UserDataFile string `mapstructure:"user_data_file"`
- Tags []string `mapstructure:"tags"`
+ // The client TOKEN to use to access your account. It
+ // can also be specified via environment variable DIGITALOCEAN_API_TOKEN, if
+ // set.
+ APIToken string `mapstructure:"api_token" required:"true"`
+ // Non standard api endpoint URL. Set this if you are
+ // using a DigitalOcean API compatible service. It can also be specified via
+ // environment variable DIGITALOCEAN_API_URL.
+ APIURL string `mapstructure:"api_url" required:"false"`
+ // The name (or slug) of the region to launch the droplet
+ // in. Consequently, this is the region where the snapshot will be available.
+ // See
+ // https://developers.digitalocean.com/documentation/v2/#list-all-regions
+ // for the accepted region names/slugs.
+ Region string `mapstructure:"region" required:"true"`
+ // The name (or slug) of the droplet size to use. See
+ // https://developers.digitalocean.com/documentation/v2/#list-all-sizes
+ // for the accepted size names/slugs.
+ Size string `mapstructure:"size" required:"true"`
+ // The name (or slug) of the base image to use. This is the
+ // image that will be used to launch a new droplet and provision it. See
+ // https://developers.digitalocean.com/documentation/v2/#list-all-images
+ // for details on how to get a list of the accepted image names/slugs.
+ Image string `mapstructure:"image" required:"true"`
+ // Set to true to enable private networking
+ // for the droplet being created. This defaults to false, or not enabled.
+ PrivateNetworking bool `mapstructure:"private_networking" required:"false"`
+ // Set to true to enable monitoring for the droplet
+ // being created. This defaults to false, or not enabled.
+ Monitoring bool `mapstructure:"monitoring" required:"false"`
+ // Set to true to enable ipv6 for the droplet being
+ // created. This defaults to false, or not enabled.
+ IPv6 bool `mapstructure:"ipv6" required:"false"`
+ // The name of the resulting snapshot that will
+ // appear in your account. Defaults to "packer-{{timestamp}}" (see
+ // configuration templates for more info).
+ SnapshotName string `mapstructure:"snapshot_name" required:"false"`
+ // The regions of the resulting
+ // snapshot that will appear in your account.
+ SnapshotRegions []string `mapstructure:"snapshot_regions" required:"false"`
+ // The time to wait, as a duration string, for a
+ // droplet to enter a desired state (such as "active") before timing out. The
+ // default state timeout is "6m".
+ StateTimeout time.Duration `mapstructure:"state_timeout" required:"false"`
+ // The name assigned to the droplet. DigitalOcean
+ // sets the hostname of the machine to this value.
+ DropletName string `mapstructure:"droplet_name" required:"false"`
+ // User data to launch with the Droplet. Packer will
+ // not automatically wait for a user script to finish before shutting down the
+ // instance this must be handled in a provisioner.
+ UserData string `mapstructure:"user_data" required:"false"`
+ // Path to a file that will be used for the user
+ // data when launching the Droplet.
+ UserDataFile string `mapstructure:"user_data_file" required:"false"`
+ // Tags to apply to the droplet when it is created
+ Tags []string `mapstructure:"tags" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/docker/config.go b/builder/docker/config.go
index 6f2dba809..c69c5c7f4 100644
--- a/builder/docker/config.go
+++ b/builder/docker/config.go
@@ -27,27 +27,66 @@ type Config struct {
Author string
Changes []string
Commit bool
- ContainerDir string `mapstructure:"container_dir"`
+ // The directory inside container to mount temp
+ // directory from host server for work file
+ // provisioner. This defaults to
+ // c:/packer-files on windows and /packer-files on other systems.
+ ContainerDir string `mapstructure:"container_dir" required:"false"`
Discard bool
- ExecUser string `mapstructure:"exec_user"`
+ // Username (UID) to run remote commands with. You can
+ // also set the group name/ID if you want: (UID or UID:GID).
+ // You may need this if you get permission errors trying to run the shell or
+ // other provisioners.
+ ExecUser string `mapstructure:"exec_user" required:"false"`
ExportPath string `mapstructure:"export_path"`
Image string
Message string
- Privileged bool `mapstructure:"privileged"`
+ // If true, run the docker container with the
+ // --privileged flag. This defaults to false if not set.
+ Privileged bool `mapstructure:"privileged" required:"false"`
Pty bool
Pull bool
- RunCommand []string `mapstructure:"run_command"`
+ // An array of arguments to pass to
+ // docker run in order to run the container. By default this is set to
+ // ["-d", "-i", "-t", "--entrypoint=/bin/sh", "--", "{{.Image}}"] if you are
+ // using a linux container, and
+ // ["-d", "-i", "-t", "--entrypoint=powershell", "--", "{{.Image}}"] if you
+ // are running a windows container. {{.Image}} is a template variable that
+ // corresponds to the image template option. Passing the entrypoint option
+ // this way will make it the default entrypoint of the resulting image, so
+ // running docker run -it --rm will start the docker image from the
+ // /bin/sh shell interpreter; you could run a script or another shell by
+ // running docker run -it --rm -c /bin/bash. If your docker image
+ // embeds a binary intended to be run often, you should consider changing the
+ // default entrypoint to point to it.
+ RunCommand []string `mapstructure:"run_command" required:"false"`
Volumes map[string]string
- FixUploadOwner bool `mapstructure:"fix_upload_owner"`
- WindowsContainer bool `mapstructure:"windows_container"`
+ // If true, files uploaded to the container
+ // will be owned by the user the container is running as. If false, the owner
+ // will depend on the version of docker installed in the system. Defaults to
+ // true.
+ FixUploadOwner bool `mapstructure:"fix_upload_owner" required:"false"`
+ // If "true", tells Packer that you are building a
+ // Windows container running on a windows host. This is necessary for building
+ // Windows containers, because our normal docker bindings do not work for them.
+ WindowsContainer bool `mapstructure:"windows_container" required:"false"`
// This is used to login to dockerhub to pull a private base container. For
// pushing to dockerhub, see the docker post-processors
Login bool
- LoginPassword string `mapstructure:"login_password"`
- LoginServer string `mapstructure:"login_server"`
- LoginUsername string `mapstructure:"login_username"`
- EcrLogin bool `mapstructure:"ecr_login"`
+ // The password to use to authenticate to login.
+ LoginPassword string `mapstructure:"login_password" required:"false"`
+ // The server address to login to.
+ LoginServer string `mapstructure:"login_server" required:"false"`
+ // The username to use to authenticate to login.
+ LoginUsername string `mapstructure:"login_username" required:"false"`
+ // Defaults to false. If true, the builder will login
+ // in order to pull the image from Amazon EC2 Container Registry
+ // (ECR). The builder only logs in for the
+ // duration of the pull. If true login_server is required and login,
+ // login_username, and login_password will be ignored. For more
+ // information see the section on ECR.
+ EcrLogin bool `mapstructure:"ecr_login" required:"false"`
AwsAccessConfig `mapstructure:",squash"`
ctx interpolate.Context
diff --git a/builder/docker/ecr_login.go b/builder/docker/ecr_login.go
index c605bc36e..5daed00cd 100644
--- a/builder/docker/ecr_login.go
+++ b/builder/docker/ecr_login.go
@@ -13,10 +13,23 @@ import (
)
type AwsAccessConfig struct {
- AccessKey string `mapstructure:"aws_access_key"`
- SecretKey string `mapstructure:"aws_secret_key"`
- Token string `mapstructure:"aws_token"`
- Profile string `mapstructure:"aws_profile"`
+ // The AWS access key used to communicate with
+ // AWS. Learn how to set
+ // this.
+ AccessKey string `mapstructure:"aws_access_key" required:"false"`
+ // The AWS secret key used to communicate with
+ // AWS. Learn how to set
+ // this.
+ SecretKey string `mapstructure:"aws_secret_key" required:"false"`
+ // The AWS access token to use. This is different from
+ // the access key and secret key. If you're not sure what this is, then you
+ // probably don't need it. This will also be read from the AWS_SESSION_TOKEN
+ // environmental variable.
+ Token string `mapstructure:"aws_token" required:"false"`
+ // The AWS shared credentials profile used to
+ // communicate with AWS. Learn how to set
+ // this.
+ Profile string `mapstructure:"aws_profile" required:"false"`
cfg *common.AccessConfig
}
diff --git a/builder/googlecompute/config.go b/builder/googlecompute/config.go
index df55d99d2..d5edc42aa 100644
--- a/builder/googlecompute/config.go
+++ b/builder/googlecompute/config.go
@@ -24,45 +24,138 @@ var reImageFamily = regexp.MustCompile(`^[a-z]([-a-z0-9]{0,61}[a-z0-9])?$`)
type Config struct {
common.PackerConfig `mapstructure:",squash"`
Comm communicator.Config `mapstructure:",squash"`
-
- AccountFile string `mapstructure:"account_file"`
- ProjectId string `mapstructure:"project_id"`
-
- AcceleratorType string `mapstructure:"accelerator_type"`
- AcceleratorCount int64 `mapstructure:"accelerator_count"`
- Address string `mapstructure:"address"`
- DisableDefaultServiceAccount bool `mapstructure:"disable_default_service_account"`
- DiskName string `mapstructure:"disk_name"`
- DiskSizeGb int64 `mapstructure:"disk_size"`
- DiskType string `mapstructure:"disk_type"`
- ImageName string `mapstructure:"image_name"`
- ImageDescription string `mapstructure:"image_description"`
- ImageEncryptionKey *compute.CustomerEncryptionKey `mapstructure:"image_encryption_key"`
- ImageFamily string `mapstructure:"image_family"`
- ImageLabels map[string]string `mapstructure:"image_labels"`
- ImageLicenses []string `mapstructure:"image_licenses"`
- InstanceName string `mapstructure:"instance_name"`
- Labels map[string]string `mapstructure:"labels"`
- MachineType string `mapstructure:"machine_type"`
- Metadata map[string]string `mapstructure:"metadata"`
- MinCpuPlatform string `mapstructure:"min_cpu_platform"`
- Network string `mapstructure:"network"`
- NetworkProjectId string `mapstructure:"network_project_id"`
- OmitExternalIP bool `mapstructure:"omit_external_ip"`
- OnHostMaintenance string `mapstructure:"on_host_maintenance"`
- Preemptible bool `mapstructure:"preemptible"`
- RawStateTimeout string `mapstructure:"state_timeout"`
- Region string `mapstructure:"region"`
- Scopes []string `mapstructure:"scopes"`
- ServiceAccountEmail string `mapstructure:"service_account_email"`
- SourceImage string `mapstructure:"source_image"`
- SourceImageFamily string `mapstructure:"source_image_family"`
- SourceImageProjectId string `mapstructure:"source_image_project_id"`
- StartupScriptFile string `mapstructure:"startup_script_file"`
- Subnetwork string `mapstructure:"subnetwork"`
- Tags []string `mapstructure:"tags"`
- UseInternalIP bool `mapstructure:"use_internal_ip"`
- Zone string `mapstructure:"zone"`
+ // The JSON file containing your account
+ // credentials. Not required if you run Packer on a GCE instance with a
+ // service account. Instructions for creating the file or using service
+ // accounts are above.
+ AccountFile string `mapstructure:"account_file" required:"false"`
+ // The project ID that will be used to launch
+ // instances and store images.
+ ProjectId string `mapstructure:"project_id" required:"true"`
+ // Full or partial URL of the guest accelerator
+ // type. GPU accelerators can only be used with
+ // "on_host_maintenance": "TERMINATE" option set. Example:
+ // "projects/project_id/zones/europe-west1-b/acceleratorTypes/nvidia-tesla-k80"
+ AcceleratorType string `mapstructure:"accelerator_type" required:"false"`
+ // Number of guest accelerator cards to add to
+ // the launched instance.
+ AcceleratorCount int64 `mapstructure:"accelerator_count" required:"false"`
+ // The name of a pre-allocated static external IP
+ // address. Note, must be the name and not the actual IP address.
+ Address string `mapstructure:"address" required:"false"`
+ // If true, the default service
+ // account will not be used if service_account_email is not specified. Set
+ // this value to true and omit service_account_email to provision a VM with
+ // no service account.
+ DisableDefaultServiceAccount bool `mapstructure:"disable_default_service_account" required:"false"`
+ // The name of the disk, if unset the instance name
+ // will be used.
+ DiskName string `mapstructure:"disk_name" required:"false"`
+ // The size of the disk in GB. This defaults to 10,
+ // which is 10GB.
+ DiskSizeGb int64 `mapstructure:"disk_size" required:"false"`
+ // Type of disk used to back your instance, like
+ // pd-ssd or pd-standard. Defaults to pd-standard.
+ DiskType string `mapstructure:"disk_type" required:"false"`
+ // The unique name of the resulting image. Defaults to
+ // "packer-{{timestamp}}".
+ ImageName string `mapstructure:"image_name" required:"false"`
+ // The description of the resulting image.
+ ImageDescription string `mapstructure:"image_description" required:"false"`
+ // Image encryption key to apply to the created image. Possible values:
+ ImageEncryptionKey *compute.CustomerEncryptionKey `mapstructure:"image_encryption_key" required:"false"`
+ // The name of the image family to which the
+ // resulting image belongs. You can create disks by specifying an image family
+ // instead of a specific image name. The image family always returns its
+ // latest image that is not deprecated.
+ ImageFamily string `mapstructure:"image_family" required:"false"`
+ // Key/value pair labels to
+ // apply to the created image.
+ ImageLabels map[string]string `mapstructure:"image_labels" required:"false"`
+ // Licenses to apply to the created
+ // image.
+ ImageLicenses []string `mapstructure:"image_licenses" required:"false"`
+ // A name to give the launched instance. Beware
+ // that this must be unique. Defaults to "packer-{{uuid}}".
+ InstanceName string `mapstructure:"instance_name" required:"false"`
+ // Key/value pair labels to apply to
+ // the launched instance.
+ Labels map[string]string `mapstructure:"labels" required:"false"`
+ // The machine type. Defaults to "n1-standard-1".
+ MachineType string `mapstructure:"machine_type" required:"false"`
+ // Metadata applied to the launched
+ // instance.
+ Metadata map[string]string `mapstructure:"metadata" required:"false"`
+ // A Minimum CPU Platform for VM Instance.
+ // Availability and default CPU platforms vary across zones, based on the
+ // hardware available in each GCP zone.
+ // Details
+ MinCpuPlatform string `mapstructure:"min_cpu_platform" required:"false"`
+ // The Google Compute network id or URL to use for the
+ // launched instance. Defaults to "default". If the value is not a URL, it
+ // will be interpolated to
+ // projects/((network_project_id))/global/networks/((network)). This value
+ // is not required if a subnet is specified.
+ Network string `mapstructure:"network" required:"false"`
+ // The project ID for the network and
+ // subnetwork to use for launched instance. Defaults to project_id.
+ NetworkProjectId string `mapstructure:"network_project_id" required:"false"`
+ // If true, the instance will not have an
+ // external IP. use_internal_ip must be true if this property is true.
+ OmitExternalIP bool `mapstructure:"omit_external_ip" required:"false"`
+ // Sets Host Maintenance Option. Valid
+ // choices are MIGRATE and TERMINATE. Please see GCE Instance Scheduling
+ // Options,
+ // as not all machine_types support MIGRATE (i.e. machines with GPUs). If
+ // preemptible is true this can only be TERMINATE. If preemptible is false,
+ // it defaults to MIGRATE
+ OnHostMaintenance string `mapstructure:"on_host_maintenance" required:"false"`
+ // If true, launch a preemptible instance.
+ Preemptible bool `mapstructure:"preemptible" required:"false"`
+ // The time to wait for instance state changes.
+ // Defaults to "5m".
+ RawStateTimeout string `mapstructure:"state_timeout" required:"false"`
+ // The region in which to launch the instance. Defaults to
+ // the region hosting the specified zone.
+ Region string `mapstructure:"region" required:"false"`
+ // The service account scopes for launched
+ // instance. Defaults to:
+ Scopes []string `mapstructure:"scopes" required:"false"`
+ // The service account to be used for
+ // launched instance. Defaults to the project's default service account unless
+ // disable_default_service_account is true.
+ ServiceAccountEmail string `mapstructure:"service_account_email" required:"false"`
+ // The source image to use to create the new image
+ // from. You can also specify source_image_family instead. If both
+ // source_image and source_image_family are specified, source_image
+ // takes precedence. Example: "debian-8-jessie-v20161027"
+ SourceImage string `mapstructure:"source_image" required:"true"`
+ // The source image family to use to create
+ // the new image from. The image family always returns its latest image that
+ // is not deprecated. Example: "debian-8".
+ SourceImageFamily string `mapstructure:"source_image_family" required:"true"`
+ // The project ID of the project
+ // containing the source image.
+ SourceImageProjectId string `mapstructure:"source_image_project_id" required:"false"`
+ // The path to a startup script to run on the
+ // VM from which the image will be made.
+ StartupScriptFile string `mapstructure:"startup_script_file" required:"false"`
+ // The Google Compute subnetwork id or URL to use for
+ // the launched instance. Only required if the network has been created with
+ // custom subnetting. Note, the region of the subnetwork must match the
+ // region or zone in which the VM is launched. If the value is not a URL,
+ // it will be interpolated to
+ // projects/((network_project_id))/regions/((region))/subnetworks/((subnetwork))
+ Subnetwork string `mapstructure:"subnetwork" required:"false"`
+ // Assign network tags to apply firewall rules to
+ // VM instance.
+ Tags []string `mapstructure:"tags" required:"false"`
+ // If true, use the instance's internal IP
+ // instead of its external IP during building.
+ UseInternalIP bool `mapstructure:"use_internal_ip" required:"false"`
+ // The zone in which to launch the instance used to create
+ // the image. Example: "us-central1-a"
+ Zone string `mapstructure:"zone" required:"true"`
Account AccountFile
stateTimeout time.Duration
diff --git a/builder/hyperone/config.go b/builder/hyperone/config.go
index f4da4df41..d8adc8f02 100644
--- a/builder/hyperone/config.go
+++ b/builder/hyperone/config.go
@@ -32,32 +32,61 @@ const (
type Config struct {
common.PackerConfig `mapstructure:",squash"`
Comm communicator.Config `mapstructure:",squash"`
-
- APIURL string `mapstructure:"api_url"`
- Token string `mapstructure:"token"`
- Project string `mapstructure:"project"`
- TokenLogin string `mapstructure:"token_login"`
-
- StateTimeout time.Duration `mapstructure:"state_timeout"`
-
- SourceImage string `mapstructure:"source_image"`
- ImageName string `mapstructure:"image_name"`
- ImageDescription string `mapstructure:"image_description"`
- ImageTags map[string]interface{} `mapstructure:"image_tags"`
- ImageService string `mapstructure:"image_service"`
-
- VmType string `mapstructure:"vm_type"`
- VmName string `mapstructure:"vm_name"`
- VmTags map[string]interface{} `mapstructure:"vm_tags"`
-
- DiskName string `mapstructure:"disk_name"`
- DiskType string `mapstructure:"disk_type"`
- DiskSize float32 `mapstructure:"disk_size"`
-
- Network string `mapstructure:"network"`
- PrivateIP string `mapstructure:"private_ip"`
- PublicIP string `mapstructure:"public_ip"`
- PublicNetAdpService string `mapstructure:"public_netadp_service"`
+ // Custom API endpoint URL, compatible with HyperOne.
+ // It can also be specified via environment variable HYPERONE_API_URL.
+ APIURL string `mapstructure:"api_url" required:"false"`
+ // The authentication token used to access your account.
+ // This can be either a session token or a service account token.
+ // If not defined, the builder will attempt to find it in the following order:
+ Token string `mapstructure:"token" required:"true"`
+ // The id or name of the project. This field is required
+ // only if using session tokens. It should be skipped when using service
+ // account authentication.
+ Project string `mapstructure:"project" required:"true"`
+ // Login (an e-mail) on HyperOne platform. Set this
+ // if you want to fetch the token by SSH authentication.
+ TokenLogin string `mapstructure:"token_login" required:"false"`
+ // Timeout for waiting on the API to complete
+ // a request. Defaults to 5m.
+ StateTimeout time.Duration `mapstructure:"state_timeout" required:"false"`
+ // ID or name of the image to launch server from.
+ SourceImage string `mapstructure:"source_image" required:"true"`
+ // The name of the resulting image. Defaults to
+ // "packer-{{timestamp}}"
+ // (see configuration templates for more info).
+ ImageName string `mapstructure:"image_name" required:"false"`
+ // The description of the resulting image.
+ ImageDescription string `mapstructure:"image_description" required:"false"`
+ // Key/value pair tags to
+ // add to the created image.
+ ImageTags map[string]interface{} `mapstructure:"image_tags" required:"false"`
+ // The service of the resulting image.
+ ImageService string `mapstructure:"image_service" required:"false"`
+ // ID or name of the type this server should be created with.
+ VmType string `mapstructure:"vm_type" required:"true"`
+ // The name of the created server.
+ VmName string `mapstructure:"vm_name" required:"false"`
+ // Key/value pair tags to
+ // add to the created server.
+ VmTags map[string]interface{} `mapstructure:"vm_tags" required:"false"`
+ // The name of the created disk.
+ DiskName string `mapstructure:"disk_name" required:"false"`
+ // The type of the created disk. Defaults to ssd.
+ DiskType string `mapstructure:"disk_type" required:"false"`
+ // Size of the created disk, in GiB.
+ DiskSize float32 `mapstructure:"disk_size" required:"true"`
+ // The ID of the network to attach to the created server.
+ Network string `mapstructure:"network" required:"false"`
+ // The ID of the private IP within chosen network
+ // that should be assigned to the created server.
+ PrivateIP string `mapstructure:"private_ip" required:"false"`
+ // The ID of the public IP that should be assigned to
+ // the created server. If network is chosen, the public IP will be associated
+ // with server's private IP.
+ PublicIP string `mapstructure:"public_ip" required:"false"`
+ // Custom service of public network adapter.
+ // Can be useful when using custom api_url. Defaults to public.
+ PublicNetAdpService string `mapstructure:"public_netadp_service" required:"false"`
ChrootDisk bool `mapstructure:"chroot_disk"`
ChrootDiskSize float32 `mapstructure:"chroot_disk_size"`
@@ -71,9 +100,13 @@ type Config struct {
MountPartition string `mapstructure:"mount_partition"`
PreMountCommands []string `mapstructure:"pre_mount_commands"`
PostMountCommands []string `mapstructure:"post_mount_commands"`
-
- SSHKeys []string `mapstructure:"ssh_keys"`
- UserData string `mapstructure:"user_data"`
+ // List of SSH keys by name or id to be added
+ // to the server on launch.
+ SSHKeys []string `mapstructure:"ssh_keys" required:"false"`
+ // User data to launch with the server. Packer will not
+ // automatically wait for a user script to finish before shutting down the
+ // instance, this must be handled in a provisioner.
+ UserData string `mapstructure:"user_data" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/hyperv/common/output_config.go b/builder/hyperv/common/output_config.go
index 14eced008..94a4f0ed6 100644
--- a/builder/hyperv/common/output_config.go
+++ b/builder/hyperv/common/output_config.go
@@ -8,7 +8,14 @@ import (
)
type OutputConfig struct {
- OutputDir string `mapstructure:"output_directory"`
+ // This setting specifies the directory that
+ // artifacts from the build, such as the virtual machine files and disks,
+ // will be output to. The path to the directory may be relative or
+ // absolute. If relative, the path is relative to the working directory
+ // packer is executed from. This directory must not exist or, if
+ // created, must be empty prior to running the builder. By default this is
+ // "output-BUILDNAME" where "BUILDNAME" is the name of the build.
+ OutputDir string `mapstructure:"output_directory" required:"false"`
}
func (c *OutputConfig) Prepare(ctx *interpolate.Context, pc *common.PackerConfig) []error {
diff --git a/builder/hyperv/common/shutdown_config.go b/builder/hyperv/common/shutdown_config.go
index faa90268f..f6a2ef9b8 100644
--- a/builder/hyperv/common/shutdown_config.go
+++ b/builder/hyperv/common/shutdown_config.go
@@ -8,8 +8,20 @@ import (
)
type ShutdownConfig struct {
- ShutdownCommand string `mapstructure:"shutdown_command"`
- RawShutdownTimeout string `mapstructure:"shutdown_timeout"`
+ // The command to use to gracefully shut down
+ // the machine once all provisioning is complete. By default this is an
+ // empty string, which tells Packer to just forcefully shut down the
+ // machine. This setting can be safely omitted if for example, a shutdown
+ // command to gracefully halt the machine is configured inside a
+ // provisioning script. If one or more scripts require a reboot it is
+ // suggested to leave this blank (since reboots may fail) and instead
+ // specify the final shutdown command in your last script.
+ ShutdownCommand string `mapstructure:"shutdown_command" required:"false"`
+ // The amount of time to wait after executing
+ // the shutdown_command for the virtual machine to actually shut down.
+ // If the machine doesn't shut down in this time it is considered an
+ // error. By default, the time out is "5m" (five minutes).
+ RawShutdownTimeout string `mapstructure:"shutdown_timeout" required:"false"`
ShutdownTimeout time.Duration ``
}
diff --git a/builder/hyperv/iso/builder.go b/builder/hyperv/iso/builder.go
index a064bc0e7..9af93c6f5 100644
--- a/builder/hyperv/iso/builder.go
+++ b/builder/hyperv/iso/builder.go
@@ -58,63 +58,133 @@ type Config struct {
hypervcommon.OutputConfig `mapstructure:",squash"`
hypervcommon.SSHConfig `mapstructure:",squash"`
hypervcommon.ShutdownConfig `mapstructure:",squash"`
-
- // The size, in megabytes, of the hard disk to create for the VM.
- // By default, this is 130048 (about 127 GB).
- DiskSize uint `mapstructure:"disk_size"`
-
- // The size, in megabytes, of the block size used to create the hard disk.
- // By default, this is 32768 (about 32 MB)
- DiskBlockSize uint `mapstructure:"disk_block_size"`
-
- // The size, in megabytes, of the computer memory in the VM.
- // By default, this is 1024 (about 1 GB).
- RamSize uint `mapstructure:"memory"`
-
- //
- SecondaryDvdImages []string `mapstructure:"secondary_iso_images"`
-
- // Should integration services iso be mounted
- GuestAdditionsMode string `mapstructure:"guest_additions_mode"`
-
- // The path to the integration services iso
- GuestAdditionsPath string `mapstructure:"guest_additions_path"`
-
- // This is the name of the new virtual machine.
- // By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build.
- VMName string `mapstructure:"vm_name"`
-
- SwitchName string `mapstructure:"switch_name"`
- SwitchVlanId string `mapstructure:"switch_vlan_id"`
- MacAddress string `mapstructure:"mac_address"`
- VlanId string `mapstructure:"vlan_id"`
- Cpu uint `mapstructure:"cpus"`
- Generation uint `mapstructure:"generation"`
- EnableMacSpoofing bool `mapstructure:"enable_mac_spoofing"`
- UseLegacyNetworkAdapter bool `mapstructure:"use_legacy_network_adapter"`
- EnableDynamicMemory bool `mapstructure:"enable_dynamic_memory"`
- EnableSecureBoot bool `mapstructure:"enable_secure_boot"`
- SecureBootTemplate string `mapstructure:"secure_boot_template"`
- EnableVirtualizationExtensions bool `mapstructure:"enable_virtualization_extensions"`
- TempPath string `mapstructure:"temp_path"`
- Version string `mapstructure:"configuration_version"`
- KeepRegistered bool `mapstructure:"keep_registered"`
+ // The size, in megabytes, of the hard disk to create
+ // for the VM. By default, this is 40 GB.
+ DiskSize uint `mapstructure:"disk_size" required:"false"`
+ // The block size of the VHD to be created.
+ // Recommended disk block size for Linux hyper-v guests is 1 MiB. This
+ // defaults to "32 MiB".
+ DiskBlockSize uint `mapstructure:"disk_block_size" required:"false"`
+ // The amount, in megabytes, of RAM to assign to the
+ // VM. By default, this is 1 GB.
+ RamSize uint `mapstructure:"memory" required:"false"`
+ // A list of ISO paths to
+ // attach to a VM when it is booted. This is most useful for unattended
+ // Windows installs, which look for an Autounattend.xml file on removable
+ // media. By default, no secondary ISO will be attached.
+ SecondaryDvdImages []string `mapstructure:"secondary_iso_images" required:"false"`
+ // If set to attach then attach and
+ // mount the ISO image specified in guest_additions_path. If set to
+ // none then guest additions are not attached and mounted; This is the
+ // default.
+ GuestAdditionsMode string `mapstructure:"guest_additions_mode" required:"false"`
+ // The path to the ISO image for guest
+ // additions.
+ GuestAdditionsPath string `mapstructure:"guest_additions_path" required:"false"`
+ // This is the name of the new virtual machine,
+ // without the file extension. By default this is "packer-BUILDNAME",
+ // where "BUILDNAME" is the name of the build.
+ VMName string `mapstructure:"vm_name" required:"false"`
+ // The name of the switch to connect the virtual
+ // machine to. By default, leaving this value unset will cause Packer to
+ // try and determine the switch to use by looking for an external switch
+ // that is up and running.
+ SwitchName string `mapstructure:"switch_name" required:"false"`
+ // This is the VLAN of the virtual switch's
+ // network card. By default none is set. If none is set then a VLAN is not
+ // set on the switch's network card. If this value is set it should match
+ // the VLAN specified in by vlan_id.
+ SwitchVlanId string `mapstructure:"switch_vlan_id" required:"false"`
+ // This allows a specific MAC address to be used on
+ // the default virtual network card. The MAC address must be a string with
+ // no delimiters, for example "0000deadbeef".
+ MacAddress string `mapstructure:"mac_address" required:"false"`
+ // This is the VLAN of the virtual machine's network
+ // card for the new virtual machine. By default none is set. If none is set
+ // then VLANs are not set on the virtual machine's network card.
+ VlanId string `mapstructure:"vlan_id" required:"false"`
+ // The number of CPUs the virtual machine should use. If
+ // this isn't specified, the default is 1 CPU.
+ Cpu uint `mapstructure:"cpus" required:"false"`
+ // The Hyper-V generation for the virtual machine. By
+ // default, this is 1. Generation 2 Hyper-V virtual machines do not support
+ // floppy drives. In this scenario use secondary_iso_images instead. Hard
+ // drives and DVD drives will also be SCSI and not IDE.
+ Generation uint `mapstructure:"generation" required:"false"`
+ // If true enable MAC address spoofing
+ // for the virtual machine. This defaults to false.
+ EnableMacSpoofing bool `mapstructure:"enable_mac_spoofing" required:"false"`
+ // If true use a legacy network adapter as the NIC.
+ // This defaults to false. A legacy network adapter is fully emulated NIC, and is thus
+ // supported by various exotic operating systems, but this emulation requires
+ // additional overhead and should only be used if absolutely necessary.
+ UseLegacyNetworkAdapter bool `mapstructure:"use_legacy_network_adapter" required:"false"`
+ // If true enable dynamic memory for
+ // the virtual machine. This defaults to false.
+ EnableDynamicMemory bool `mapstructure:"enable_dynamic_memory" required:"false"`
+ // If true enable secure boot for the
+ // virtual machine. This defaults to false. See secure_boot_template
+ // below for additional settings.
+ EnableSecureBoot bool `mapstructure:"enable_secure_boot" required:"false"`
+ // The secure boot template to be
+ // configured. Valid values are "MicrosoftWindows" (Windows) or
+ // "MicrosoftUEFICertificateAuthority" (Linux). This only takes effect if
+ // enable_secure_boot is set to "true". This defaults to "MicrosoftWindows".
+ SecureBootTemplate string `mapstructure:"secure_boot_template" required:"false"`
+ // If true enable
+ // virtualization extensions for the virtual machine. This defaults to
+ // false. For nested virtualization you need to enable MAC spoofing,
+ // disable dynamic memory and have at least 4GB of RAM assigned to the
+ // virtual machine.
+ EnableVirtualizationExtensions bool `mapstructure:"enable_virtualization_extensions" required:"false"`
+ // The location under which Packer will create a
+ // directory to house all the VM files and folders during the build.
+ // By default %TEMP% is used which, for most systems, will evaluate to
+ // %USERPROFILE%/AppData/Local/Temp.
+ TempPath string `mapstructure:"temp_path" required:"false"`
+ // This allows you to set the vm version when
+ // calling New-VM to generate the vm.
+ Version string `mapstructure:"configuration_version" required:"false"`
+ // If "true", Packer will not delete the VM from
+ // The Hyper-V manager.
+ KeepRegistered bool `mapstructure:"keep_registered" required:"false"`
Communicator string `mapstructure:"communicator"`
-
- AdditionalDiskSize []uint `mapstructure:"disk_additional_size"`
-
- SkipCompaction bool `mapstructure:"skip_compaction"`
-
- SkipExport bool `mapstructure:"skip_export"`
-
- // Use differencing disk
- DifferencingDisk bool `mapstructure:"differencing_disk"`
-
- // Create the VM with a Fixed VHD format disk instead of Dynamic VHDX
- FixedVHD bool `mapstructure:"use_fixed_vhd_format"`
-
- Headless bool `mapstructure:"headless"`
+ // The size or sizes of any
+ // additional hard disks for the VM in megabytes. If this is not specified
+ // then the VM will only contain a primary hard disk. Additional drives
+ // will be attached to the SCSI interface only. The builder uses
+ // expandable rather than fixed-size virtual hard disks, so the actual
+ // file representing the disk will not use the full size unless it is
+ // full.
+ AdditionalDiskSize []uint `mapstructure:"disk_additional_size" required:"false"`
+ // If true skip compacting the hard disk for
+ // the virtual machine when exporting. This defaults to false.
+ SkipCompaction bool `mapstructure:"skip_compaction" required:"false"`
+ // If true Packer will skip the export of the VM.
+ // If you are interested only in the VHD/VHDX files, you can enable this
+ // option. The resulting VHD/VHDX file will be output to
+ // /Virtual Hard Disks. By default this option is false
+ // and Packer will export the VM to output_directory.
+ SkipExport bool `mapstructure:"skip_export" required:"false"`
+ // If true enables differencing disks. Only
+ // the changes will be written to the new disk. This is especially useful if
+ // your source is a VHD/VHDX. This defaults to false.
+ DifferencingDisk bool `mapstructure:"differencing_disk" required:"false"`
+ // If true, creates the boot disk on the
+ // virtual machine as a fixed VHD format disk. The default is false, which
+ // creates a dynamic VHDX format disk. This option requires setting
+ // generation to 1, skip_compaction to true, and
+ // differencing_disk to false. Additionally, any value entered for
+ // disk_block_size will be ignored. The most likely use case for this
+ // option is outputing a disk that is in the format required for upload to
+ // Azure.
+ FixedVHD bool `mapstructure:"use_fixed_vhd_format" required:"false"`
+ // Packer defaults to building Hyper-V virtual
+ // machines by launching a GUI that shows the console of the machine being
+ // built. When this value is set to true, the machine will start without a
+ // console.
+ Headless bool `mapstructure:"headless" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/hyperv/vmcx/builder.go b/builder/hyperv/vmcx/builder.go
index e152294be..e34f33698 100644
--- a/builder/hyperv/vmcx/builder.go
+++ b/builder/hyperv/vmcx/builder.go
@@ -48,62 +48,124 @@ type Config struct {
hypervcommon.OutputConfig `mapstructure:",squash"`
hypervcommon.SSHConfig `mapstructure:",squash"`
hypervcommon.ShutdownConfig `mapstructure:",squash"`
-
- // The size, in megabytes, of the computer memory in the VM.
- // By default, this is 1024 (about 1 GB).
- RamSize uint `mapstructure:"memory"`
-
- //
- SecondaryDvdImages []string `mapstructure:"secondary_iso_images"`
-
- // Should integration services iso be mounted
- GuestAdditionsMode string `mapstructure:"guest_additions_mode"`
-
- // The path to the integration services iso
- GuestAdditionsPath string `mapstructure:"guest_additions_path"`
+ // The amount, in megabytes, of RAM to assign to the
+ // VM. By default, this is 1 GB.
+ RamSize uint `mapstructure:"memory" required:"false"`
+ // A list of ISO paths to
+ // attach to a VM when it is booted. This is most useful for unattended
+ // Windows installs, which look for an Autounattend.xml file on removable
+ // media. By default, no secondary ISO will be attached.
+ SecondaryDvdImages []string `mapstructure:"secondary_iso_images" required:"false"`
+ // If set to attach then attach and
+ // mount the ISO image specified in guest_additions_path. If set to
+ // none then guest additions are not attached and mounted; This is the
+ // default.
+ GuestAdditionsMode string `mapstructure:"guest_additions_mode" required:"false"`
+ // The path to the ISO image for guest
+ // additions.
+ GuestAdditionsPath string `mapstructure:"guest_additions_path" required:"false"`
// This is the path to a directory containing an exported virtual machine.
CloneFromVMCXPath string `mapstructure:"clone_from_vmcx_path"`
// This is the name of the virtual machine to clone from.
CloneFromVMName string `mapstructure:"clone_from_vm_name"`
-
- // This is the name of the snapshot to clone from. A blank snapshot name will use the latest snapshot.
- CloneFromSnapshotName string `mapstructure:"clone_from_snapshot_name"`
-
- // This will clone all snapshots if true. It will clone latest snapshot if false.
- CloneAllSnapshots bool `mapstructure:"clone_all_snapshots"`
-
- // This is the name of the new virtual machine.
- // By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build.
- VMName string `mapstructure:"vm_name"`
-
- // Use differencing disk
- DifferencingDisk bool `mapstructure:"differencing_disk"`
-
- SwitchName string `mapstructure:"switch_name"`
- CompareCopy bool `mapstructure:"copy_in_compare"`
- SwitchVlanId string `mapstructure:"switch_vlan_id"`
- MacAddress string `mapstructure:"mac_address"`
- VlanId string `mapstructure:"vlan_id"`
- Cpu uint `mapstructure:"cpus"`
- Generation uint `mapstructure:"generation"`
- EnableMacSpoofing bool `mapstructure:"enable_mac_spoofing"`
- EnableDynamicMemory bool `mapstructure:"enable_dynamic_memory"`
- EnableSecureBoot bool `mapstructure:"enable_secure_boot"`
- SecureBootTemplate string `mapstructure:"secure_boot_template"`
- EnableVirtualizationExtensions bool `mapstructure:"enable_virtualization_extensions"`
- TempPath string `mapstructure:"temp_path"`
- Version string `mapstructure:"configuration_version"`
- KeepRegistered bool `mapstructure:"keep_registered"`
+ // The name of a snapshot in the
+ // source machine to use as a starting point for the clone. If the value
+ // given is an empty string, the last snapshot present in the source will
+ // be chosen as the starting point for the new VM.
+ CloneFromSnapshotName string `mapstructure:"clone_from_snapshot_name" required:"false"`
+ // If set to true all snapshots
+ // present in the source machine will be copied when the machine is
+ // cloned. The final result of the build will be an exported virtual
+ // machine that contains all the snapshots of the parent.
+ CloneAllSnapshots bool `mapstructure:"clone_all_snapshots" required:"false"`
+ // This is the name of the new virtual machine,
+ // without the file extension. By default this is "packer-BUILDNAME",
+ // where "BUILDNAME" is the name of the build.
+ VMName string `mapstructure:"vm_name" required:"false"`
+ // If true enables differencing disks. Only
+ // the changes will be written to the new disk. This is especially useful if
+ // your source is a VHD/VHDX. This defaults to false.
+ DifferencingDisk bool `mapstructure:"differencing_disk" required:"false"`
+ // The name of the switch to connect the virtual
+ // machine to. By default, leaving this value unset will cause Packer to
+ // try and determine the switch to use by looking for an external switch
+ // that is up and running.
+ SwitchName string `mapstructure:"switch_name" required:"false"`
+ // When cloning a vm to build from, we run a powershell
+ // Compare-VM command, which, depending on your version of Windows, may need
+ // the "Copy" flag to be set to true or false. Defaults to "false". Command:
+ CompareCopy bool `mapstructure:"copy_in_compare" required:"false"`
+ // This is the VLAN of the virtual switch's
+ // network card. By default none is set. If none is set then a VLAN is not
+ // set on the switch's network card. If this value is set it should match
+ // the VLAN specified in by vlan_id.
+ SwitchVlanId string `mapstructure:"switch_vlan_id" required:"false"`
+ // This allows a specific MAC address to be used on
+ // the default virtual network card. The MAC address must be a string with
+ // no delimiters, for example "0000deadbeef".
+ MacAddress string `mapstructure:"mac_address" required:"false"`
+ // This is the VLAN of the virtual machine's network
+ // card for the new virtual machine. By default none is set. If none is set
+ // then VLANs are not set on the virtual machine's network card.
+ VlanId string `mapstructure:"vlan_id" required:"false"`
+ // The number of CPUs the virtual machine should use. If
+ // this isn't specified, the default is 1 CPU.
+ Cpu uint `mapstructure:"cpus" required:"false"`
+ // The Hyper-V generation for the virtual machine. By
+ // default, this is 1. Generation 2 Hyper-V virtual machines do not support
+ // floppy drives. In this scenario use secondary_iso_images instead. Hard
+ // drives and DVD drives will also be SCSI and not IDE.
+ Generation uint `mapstructure:"generation" required:"false"`
+ // If true enable MAC address spoofing
+ // for the virtual machine. This defaults to false.
+ EnableMacSpoofing bool `mapstructure:"enable_mac_spoofing" required:"false"`
+ // If true enable dynamic memory for
+ // the virtual machine. This defaults to false.
+ EnableDynamicMemory bool `mapstructure:"enable_dynamic_memory" required:"false"`
+ // If true enable secure boot for the
+ // virtual machine. This defaults to false. See secure_boot_template
+ // below for additional settings.
+ EnableSecureBoot bool `mapstructure:"enable_secure_boot" required:"false"`
+ // The secure boot template to be
+ // configured. Valid values are "MicrosoftWindows" (Windows) or
+ // "MicrosoftUEFICertificateAuthority" (Linux). This only takes effect if
+ // enable_secure_boot is set to "true". This defaults to "MicrosoftWindows".
+ SecureBootTemplate string `mapstructure:"secure_boot_template" required:"false"`
+ // If true enable
+ // virtualization extensions for the virtual machine. This defaults to
+ // false. For nested virtualization you need to enable MAC spoofing,
+ // disable dynamic memory and have at least 4GB of RAM assigned to the
+ // virtual machine.
+ EnableVirtualizationExtensions bool `mapstructure:"enable_virtualization_extensions" required:"false"`
+ // The location under which Packer will create a
+ // directory to house all the VM files and folders during the build.
+ // By default %TEMP% is used which, for most systems, will evaluate to
+ // %USERPROFILE%/AppData/Local/Temp.
+ TempPath string `mapstructure:"temp_path" required:"false"`
+ // This allows you to set the vm version when
+ // calling New-VM to generate the vm.
+ Version string `mapstructure:"configuration_version" required:"false"`
+ // If "true", Packer will not delete the VM from
+ // The Hyper-V manager.
+ KeepRegistered bool `mapstructure:"keep_registered" required:"false"`
Communicator string `mapstructure:"communicator"`
-
- SkipCompaction bool `mapstructure:"skip_compaction"`
-
- SkipExport bool `mapstructure:"skip_export"`
-
- Headless bool `mapstructure:"headless"`
+ // If true skip compacting the hard disk for
+ // the virtual machine when exporting. This defaults to false.
+ SkipCompaction bool `mapstructure:"skip_compaction" required:"false"`
+ // If true Packer will skip the export of the VM.
+ // If you are interested only in the VHD/VHDX files, you can enable this
+ // option. The resulting VHD/VHDX file will be output to
+ // /Virtual Hard Disks. By default this option is false
+ // and Packer will export the VM to output_directory.
+ SkipExport bool `mapstructure:"skip_export" required:"false"`
+ // Packer defaults to building Hyper-V virtual
+ // machines by launching a GUI that shows the console of the machine being
+ // built. When this value is set to true, the machine will start without a
+ // console.
+ Headless bool `mapstructure:"headless" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/lxc/config.go b/builder/lxc/config.go
index b0736a63f..7f356b16e 100644
--- a/builder/lxc/config.go
+++ b/builder/lxc/config.go
@@ -14,18 +14,52 @@ import (
type Config struct {
common.PackerConfig `mapstructure:",squash"`
- ConfigFile string `mapstructure:"config_file"`
- OutputDir string `mapstructure:"output_directory"`
- ContainerName string `mapstructure:"container_name"`
- CommandWrapper string `mapstructure:"command_wrapper"`
- RawInitTimeout string `mapstructure:"init_timeout"`
- CreateOptions []string `mapstructure:"create_options"`
- StartOptions []string `mapstructure:"start_options"`
- AttachOptions []string `mapstructure:"attach_options"`
- Name string `mapstructure:"template_name"`
- Parameters []string `mapstructure:"template_parameters"`
- EnvVars []string `mapstructure:"template_environment_vars"`
- TargetRunlevel int `mapstructure:"target_runlevel"`
+ // The path to the lxc configuration file.
+ ConfigFile string `mapstructure:"config_file" required:"true"`
+ // The directory in which to save the exported
+ // tar.gz. Defaults to output- in the current directory.
+ OutputDir string `mapstructure:"output_directory" required:"false"`
+ // The name of the LXC container. Usually stored
+ // in /var/lib/lxc/containers/. Defaults to
+ // packer-.
+ ContainerName string `mapstructure:"container_name" required:"false"`
+ // Allows you to specify a wrapper command, such
+ // as ssh so you can execute packer builds on a remote host. Defaults to
+ // Empty.
+ CommandWrapper string `mapstructure:"command_wrapper" required:"false"`
+ // The timeout in seconds to wait for the the
+ // container to start. Defaults to 20 seconds.
+ RawInitTimeout string `mapstructure:"init_timeout" required:"false"`
+ // Options to pass to lxc-create. For
+ // instance, you can specify a custom LXC container configuration file with
+ // ["-f", "/path/to/lxc.conf"]. Defaults to []. See man 1 lxc-create for
+ // available options.
+ CreateOptions []string `mapstructure:"create_options" required:"false"`
+ // Options to pass to lxc-start. For
+ // instance, you can override parameters from the LXC container configuration
+ // file via ["--define", "KEY=VALUE"]. Defaults to []. See
+ // man 1 lxc-start for available options.
+ StartOptions []string `mapstructure:"start_options" required:"false"`
+ // Options to pass to lxc-attach. For
+ // instance, you can prevent the container from inheriting the host machine's
+ // environment by specifying ["--clear-env"]. Defaults to []. See
+ // man 1 lxc-attach for available options.
+ AttachOptions []string `mapstructure:"attach_options" required:"false"`
+ // The LXC template name to use.
+ Name string `mapstructure:"template_name" required:"true"`
+ // Options to pass to the given
+ // lxc-template command, usually located in
+ // /usr/share/lxc/templates/lxc-. Note: This gets passed as
+ // ARGV to the template command. Ensure you have an array of strings, as a
+ // single string with spaces probably won't work. Defaults to [].
+ Parameters []string `mapstructure:"template_parameters" required:"false"`
+ // Environmental variables to
+ // use to build the template with.
+ EnvVars []string `mapstructure:"template_environment_vars" required:"true"`
+ // The minimum run level to wait for the
+ // container to reach. Note some distributions (Ubuntu) simulate run levels
+ // and may report 5 rather than 3.
+ TargetRunlevel int `mapstructure:"target_runlevel" required:"false"`
InitTimeout time.Duration
ctx interpolate.Context
diff --git a/builder/lxd/config.go b/builder/lxd/config.go
index bcd3aea80..d8b265a1a 100644
--- a/builder/lxd/config.go
+++ b/builder/lxd/config.go
@@ -12,14 +12,30 @@ import (
type Config struct {
common.PackerConfig `mapstructure:",squash"`
- OutputImage string `mapstructure:"output_image"`
+ // The name of the output artifact. Defaults to
+ // name.
+ OutputImage string `mapstructure:"output_image" required:"false"`
ContainerName string `mapstructure:"container_name"`
- CommandWrapper string `mapstructure:"command_wrapper"`
- Image string `mapstructure:"image"`
+ // Lets you prefix all builder commands, such as
+ // with ssh for a remote build host. Defaults to "".
+ CommandWrapper string `mapstructure:"command_wrapper" required:"false"`
+ // The source image to use when creating the build
+ // container. This can be a (local or remote) image (name or fingerprint).
+ // E.G. my-base-image, ubuntu-daily:x, 08fababf6f27, ...
+ Image string `mapstructure:"image" required:"true"`
Profile string `mapstructure:"profile"`
- InitSleep string `mapstructure:"init_sleep"`
- PublishProperties map[string]string `mapstructure:"publish_properties"`
- LaunchConfig map[string]string `mapstructure:"launch_config"`
+ // The number of seconds to sleep between launching
+ // the LXD instance and provisioning it; defaults to 3 seconds.
+ InitSleep string `mapstructure:"init_sleep" required:"false"`
+ // Pass key values to the publish
+ // step to be set as properties on the output image. This is most helpful to
+ // set the description, but can be used to set anything needed. See
+ // https://stgraber.org/2016/03/30/lxd-2-0-image-management-512/
+ // for more properties.
+ PublishProperties map[string]string `mapstructure:"publish_properties" required:"false"`
+ // List of key/value pairs you wish to
+ // pass to lxc launch via --config. Defaults to empty.
+ LaunchConfig map[string]string `mapstructure:"launch_config" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/ncloud/config.go b/builder/ncloud/config.go
index e16f480c0..73a20a5b3 100644
--- a/builder/ncloud/config.go
+++ b/builder/ncloud/config.go
@@ -18,16 +18,39 @@ type Config struct {
AccessKey string `mapstructure:"access_key"`
SecretKey string `mapstructure:"secret_key"`
- ServerImageProductCode string `mapstructure:"server_image_product_code"`
- ServerProductCode string `mapstructure:"server_product_code"`
- MemberServerImageNo string `mapstructure:"member_server_image_no"`
- ServerImageName string `mapstructure:"server_image_name"`
- ServerImageDescription string `mapstructure:"server_image_description"`
- UserData string `mapstructure:"user_data"`
- UserDataFile string `mapstructure:"user_data_file"`
- BlockStorageSize int `mapstructure:"block_storage_size"`
- Region string `mapstructure:"region"`
- AccessControlGroupConfigurationNo string `mapstructure:"access_control_group_configuration_no"`
+ // Product code of an image to create.
+ // (member_server_image_no is required if not specified)
+ ServerImageProductCode string `mapstructure:"server_image_product_code" required:"true"`
+ // Product (spec) code to create.
+ ServerProductCode string `mapstructure:"server_product_code" required:"true"`
+ // Previous image code. If there is an
+ // image previously created, it can be used to create a new image.
+ // (server_image_product_code is required if not specified)
+ MemberServerImageNo string `mapstructure:"member_server_image_no" required:"false"`
+ // Name of an image to create.
+ ServerImageName string `mapstructure:"server_image_name" required:"false"`
+ // Description of an image to create.
+ ServerImageDescription string `mapstructure:"server_image_description" required:"false"`
+ // User data to apply when launching the instance. Note
+ // that you need to be careful about escaping characters due to the templates
+ // being JSON. It is often more convenient to use user_data_file, instead.
+ // Packer will not automatically wait for a user script to finish before
+ // shutting down the instance this must be handled in a provisioner.
+ UserData string `mapstructure:"user_data" required:"false"`
+ // Path to a file that will be used for the user
+ // data when launching the instance.
+ UserDataFile string `mapstructure:"user_data_file" required:"false"`
+ // You can add block storage ranging from 10
+ // GB to 2000 GB, in increments of 10 GB.
+ BlockStorageSize int `mapstructure:"block_storage_size" required:"false"`
+ // Name of the region where you want to create an image.
+ // (default: Korea)
+ Region string `mapstructure:"region" required:"false"`
+ // This is used to allow
+ // winrm access when you create a Windows server. An ACG that specifies an
+ // access source (0.0.0.0/0) and allowed port (5985) must be created in
+ // advance.
+ AccessControlGroupConfigurationNo string `mapstructure:"access_control_group_configuration_no" required:"false"`
Comm communicator.Config `mapstructure:",squash"`
ctx *interpolate.Context
diff --git a/builder/openstack/access_config.go b/builder/openstack/access_config.go
index ad63a396c..2b9c27d86 100644
--- a/builder/openstack/access_config.go
+++ b/builder/openstack/access_config.go
@@ -16,25 +16,76 @@ import (
// AccessConfig is for common configuration related to openstack access
type AccessConfig struct {
- Username string `mapstructure:"username"`
+ // The username or id used to connect to
+ // the OpenStack service. If not specified, Packer will use the environment
+ // variable OS_USERNAME or OS_USERID, if set. This is not required if
+ // using access token or application credential instead of password, or if using
+ // cloud.yaml.
+ Username string `mapstructure:"username" required:"true"`
UserID string `mapstructure:"user_id"`
- Password string `mapstructure:"password"`
- IdentityEndpoint string `mapstructure:"identity_endpoint"`
- TenantID string `mapstructure:"tenant_id"`
+ // The password used to connect to the OpenStack
+ // service. If not specified, Packer will use the environment variables
+ // OS_PASSWORD, if set. This is not required if using access token or
+ // application credential instead of password, or if using cloud.yaml.
+ Password string `mapstructure:"password" required:"true"`
+ // The URL to the OpenStack Identity service.
+ // If not specified, Packer will use the environment variables OS_AUTH_URL,
+ // if set. This is not required if using cloud.yaml.
+ IdentityEndpoint string `mapstructure:"identity_endpoint" required:"true"`
+ // The tenant ID or name to boot the
+ // instance into. Some OpenStack installations require this. If not specified,
+ // Packer will use the environment variable OS_TENANT_NAME or
+ // OS_TENANT_ID, if set. Tenant is also called Project in later versions of
+ // OpenStack.
+ TenantID string `mapstructure:"tenant_id" required:"false"`
TenantName string `mapstructure:"tenant_name"`
DomainID string `mapstructure:"domain_id"`
- DomainName string `mapstructure:"domain_name"`
- Insecure bool `mapstructure:"insecure"`
- Region string `mapstructure:"region"`
- EndpointType string `mapstructure:"endpoint_type"`
- CACertFile string `mapstructure:"cacert"`
- ClientCertFile string `mapstructure:"cert"`
- ClientKeyFile string `mapstructure:"key"`
- Token string `mapstructure:"token"`
- ApplicationCredentialName string `mapstructure:"application_credential_name"`
- ApplicationCredentialID string `mapstructure:"application_credential_id"`
- ApplicationCredentialSecret string `mapstructure:"application_credential_secret"`
- Cloud string `mapstructure:"cloud"`
+ // The Domain name or ID you are
+ // authenticating with. OpenStack installations require this if identity v3 is
+ // used. Packer will use the environment variable OS_DOMAIN_NAME or
+ // OS_DOMAIN_ID, if set.
+ DomainName string `mapstructure:"domain_name" required:"false"`
+ // Whether or not the connection to OpenStack can be
+ // done over an insecure connection. By default this is false.
+ Insecure bool `mapstructure:"insecure" required:"false"`
+ // The name of the region, such as "DFW", in which to
+ // launch the server to create the image. If not specified, Packer will use
+ // the environment variable OS_REGION_NAME, if set.
+ Region string `mapstructure:"region" required:"false"`
+ // The endpoint type to use. Can be any of
+ // "internal", "internalURL", "admin", "adminURL", "public", and "publicURL".
+ // By default this is "public".
+ EndpointType string `mapstructure:"endpoint_type" required:"false"`
+ // Custom CA certificate file path. If omitted the
+ // OS_CACERT environment variable can be used.
+ CACertFile string `mapstructure:"cacert" required:"false"`
+ // Client certificate file path for SSL client
+ // authentication. If omitted the OS_CERT environment variable can be used.
+ ClientCertFile string `mapstructure:"cert" required:"false"`
+ // Client private key file path for SSL client
+ // authentication. If omitted the OS_KEY environment variable can be used.
+ ClientKeyFile string `mapstructure:"key" required:"false"`
+ // the token (id) to use with token based authorization.
+ // Packer will use the environment variable OS_TOKEN, if set.
+ Token string `mapstructure:"token" required:"false"`
+ // The application credential name to
+ // use with application credential based authorization. Packer will use the
+ // environment variable OS_APPLICATION_CREDENTIAL_NAME, if set.
+ ApplicationCredentialName string `mapstructure:"application_credential_name" required:"false"`
+ // The application credential id to
+ // use with application credential based authorization. Packer will use the
+ // environment variable OS_APPLICATION_CREDENTIAL_ID, if set.
+ ApplicationCredentialID string `mapstructure:"application_credential_id" required:"false"`
+ // The application credential secret
+ // to use with application credential based authorization. Packer will use the
+ // environment variable OS_APPLICATION_CREDENTIAL_SECRET, if set.
+ ApplicationCredentialSecret string `mapstructure:"application_credential_secret" required:"false"`
+ // An entry in a clouds.yaml file. See the OpenStack
+ // os-client-config
+ // documentation
+ // for more information about clouds.yaml files. If omitted, the OS_CLOUD
+ // environment variable is used.
+ Cloud string `mapstructure:"cloud" required:"false"`
osClient *gophercloud.ProviderClient
}
diff --git a/builder/openstack/image_config.go b/builder/openstack/image_config.go
index ffc9c6896..1f907f1c8 100644
--- a/builder/openstack/image_config.go
+++ b/builder/openstack/image_config.go
@@ -10,12 +10,24 @@ import (
// ImageConfig is for common configuration related to creating Images.
type ImageConfig struct {
- ImageName string `mapstructure:"image_name"`
- ImageMetadata map[string]string `mapstructure:"metadata"`
- ImageVisibility imageservice.ImageVisibility `mapstructure:"image_visibility"`
- ImageMembers []string `mapstructure:"image_members"`
- ImageDiskFormat string `mapstructure:"image_disk_format"`
- ImageTags []string `mapstructure:"image_tags"`
+ // The name of the resulting image.
+ ImageName string `mapstructure:"image_name" required:"true"`
+ // Glance metadata that will be
+ // applied to the image.
+ ImageMetadata map[string]string `mapstructure:"metadata" required:"false"`
+ // One of "public", "private", "shared", or
+ // "community".
+ ImageVisibility imageservice.ImageVisibility `mapstructure:"image_visibility" required:"false"`
+ // List of members to add to the image
+ // after creation. An image member is usually a project (also called the
+ // "tenant") with whom the image is shared.
+ ImageMembers []string `mapstructure:"image_members" required:"false"`
+ // Disk format of the resulting image. This
+ // option works if use_blockstorage_volume is true.
+ ImageDiskFormat string `mapstructure:"image_disk_format" required:"false"`
+ // List of tags to add to the image after
+ // creation.
+ ImageTags []string `mapstructure:"image_tags" required:"false"`
}
func (c *ImageConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/openstack/run_config.go b/builder/openstack/run_config.go
index bae71753f..8d1aef3a3 100644
--- a/builder/openstack/run_config.go
+++ b/builder/openstack/run_config.go
@@ -14,46 +14,121 @@ import (
// image and details on how to access that launched image.
type RunConfig struct {
Comm communicator.Config `mapstructure:",squash"`
-
- SourceImage string `mapstructure:"source_image"`
- SourceImageName string `mapstructure:"source_image_name"`
- SourceImageFilters ImageFilter `mapstructure:"source_image_filter"`
- Flavor string `mapstructure:"flavor"`
- AvailabilityZone string `mapstructure:"availability_zone"`
- RackconnectWait bool `mapstructure:"rackconnect_wait"`
- FloatingIPNetwork string `mapstructure:"floating_ip_network"`
- FloatingIP string `mapstructure:"floating_ip"`
- ReuseIPs bool `mapstructure:"reuse_ips"`
- SecurityGroups []string `mapstructure:"security_groups"`
- Networks []string `mapstructure:"networks"`
- Ports []string `mapstructure:"ports"`
- UserData string `mapstructure:"user_data"`
- UserDataFile string `mapstructure:"user_data_file"`
- InstanceName string `mapstructure:"instance_name"`
- InstanceMetadata map[string]string `mapstructure:"instance_metadata"`
- ForceDelete bool `mapstructure:"force_delete"`
-
- ConfigDrive bool `mapstructure:"config_drive"`
-
- // Used for BC, value will be passed to the "floating_ip_network"
- FloatingIPPool string `mapstructure:"floating_ip_pool"`
-
- UseBlockStorageVolume bool `mapstructure:"use_blockstorage_volume"`
- VolumeName string `mapstructure:"volume_name"`
- VolumeType string `mapstructure:"volume_type"`
- VolumeSize int `mapstructure:"volume_size"`
- VolumeAvailabilityZone string `mapstructure:"volume_availability_zone"`
+ // The ID or full URL to the base image to use. This
+ // is the image that will be used to launch a new server and provision it.
+ // Unless you specify completely custom SSH settings, the source image must
+ // have cloud-init installed so that the keypair gets assigned properly.
+ SourceImage string `mapstructure:"source_image" required:"true"`
+ // The name of the base image to use. This is
+ // an alternative way of providing source_image and only either of them can
+ // be specified.
+ SourceImageName string `mapstructure:"source_image_name" required:"true"`
+ // The search filters for determining the base
+ // image to use. This is an alternative way of providing source_image and
+ // only one of these methods can be used. source_image will override the
+ // filters.
+ SourceImageFilters ImageFilter `mapstructure:"source_image_filter" required:"true"`
+ // The ID, name, or full URL for the desired flavor for
+ // the server to be created.
+ Flavor string `mapstructure:"flavor" required:"true"`
+ // The availability zone to launch the server
+ // in. If this isn't specified, the default enforced by your OpenStack cluster
+ // will be used. This may be required for some OpenStack clusters.
+ AvailabilityZone string `mapstructure:"availability_zone" required:"false"`
+ // For rackspace, whether or not to wait for
+ // Rackconnect to assign the machine an IP address before connecting via SSH.
+ // Defaults to false.
+ RackconnectWait bool `mapstructure:"rackconnect_wait" required:"false"`
+ // The ID or name of an external network that
+ // can be used for creation of a new floating IP.
+ FloatingIPNetwork string `mapstructure:"floating_ip_network" required:"false"`
+ // A specific floating IP to assign to this instance.
+ FloatingIP string `mapstructure:"floating_ip" required:"false"`
+ // Whether or not to attempt to reuse existing
+ // unassigned floating ips in the project before allocating a new one. Note
+ // that it is not possible to safely do this concurrently, so if you are
+ // running multiple openstack builds concurrently, or if other processes are
+ // assigning and using floating IPs in the same openstack project while packer
+ // is running, you should not set this to true. Defaults to false.
+ ReuseIPs bool `mapstructure:"reuse_ips" required:"false"`
+ // A list of security groups by name to
+ // add to this instance.
+ SecurityGroups []string `mapstructure:"security_groups" required:"false"`
+ // A list of networks by UUID to attach to
+ // this instance.
+ Networks []string `mapstructure:"networks" required:"false"`
+ // A list of ports by UUID to attach to this
+ // instance.
+ Ports []string `mapstructure:"ports" required:"false"`
+ // User data to apply when launching the instance. Note
+ // that you need to be careful about escaping characters due to the templates
+ // being JSON. It is often more convenient to use user_data_file, instead.
+ // Packer will not automatically wait for a user script to finish before
+ // shutting down the instance this must be handled in a provisioner.
+ UserData string `mapstructure:"user_data" required:"false"`
+ // Path to a file that will be used for the user
+ // data when launching the instance.
+ UserDataFile string `mapstructure:"user_data_file" required:"false"`
+ // Name that is applied to the server instance
+ // created by Packer. If this isn't specified, the default is same as
+ // image_name.
+ InstanceName string `mapstructure:"instance_name" required:"false"`
+ // Metadata that is
+ // applied to the server instance created by Packer. Also called server
+ // properties in some documentation. The strings have a max size of 255 bytes
+ // each.
+ InstanceMetadata map[string]string `mapstructure:"instance_metadata" required:"false"`
+ // Whether to force the OpenStack instance to be
+ // forcefully deleted. This is useful for environments that have
+ // reclaim / soft deletion enabled. By default this is false.
+ ForceDelete bool `mapstructure:"force_delete" required:"false"`
+ // Whether or not nova should use ConfigDrive for
+ // cloud-init metadata.
+ ConfigDrive bool `mapstructure:"config_drive" required:"false"`
+ // Deprecated use floating_ip_network
+ // instead.
+ FloatingIPPool string `mapstructure:"floating_ip_pool" required:"false"`
+ // Use Block Storage service volume for
+ // the instance root volume instead of Compute service local volume (default).
+ UseBlockStorageVolume bool `mapstructure:"use_blockstorage_volume" required:"false"`
+ // Name of the Block Storage service volume. If this
+ // isn't specified, random string will be used.
+ VolumeName string `mapstructure:"volume_name" required:"false"`
+ // Type of the Block Storage service volume. If this
+ // isn't specified, the default enforced by your OpenStack cluster will be
+ // used.
+ VolumeType string `mapstructure:"volume_type" required:"false"`
+ // Size of the Block Storage service volume in GB. If
+ // this isn't specified, it is set to source image min disk value (if set) or
+ // calculated from the source image bytes size. Note that in some cases this
+ // needs to be specified, if use_blockstorage_volume is true.
+ VolumeSize int `mapstructure:"volume_size" required:"false"`
+ // Availability zone of the Block
+ // Storage service volume. If omitted, Compute instance availability zone will
+ // be used. If both of Compute instance and Block Storage volume availability
+ // zones aren't specified, the default enforced by your OpenStack cluster will
+ // be used.
+ VolumeAvailabilityZone string `mapstructure:"volume_availability_zone" required:"false"`
// Not really used, but here for BC
OpenstackProvider string `mapstructure:"openstack_provider"`
- UseFloatingIp bool `mapstructure:"use_floating_ip"`
+ // Deprecated use floating_ip or
+ // floating_ip_pool instead.
+ UseFloatingIp bool `mapstructure:"use_floating_ip" required:"false"`
sourceImageOpts images.ListOpts
}
type ImageFilter struct {
- Filters ImageFilterOptions `mapstructure:"filters"`
- MostRecent bool `mapstructure:"most_recent"`
+ // filters used to select a source_image.
+ // NOTE: This will fail unless exactly one image is returned, or
+ // most_recent is set to true. Of the filters described in
+ // ImageService, the
+ // following are valid:
+ Filters ImageFilterOptions `mapstructure:"filters" required:"false"`
+ // Selects the newest created image when true.
+ // This is most useful for selecting a daily distro build.
+ MostRecent bool `mapstructure:"most_recent" required:"false"`
}
type ImageFilterOptions struct {
diff --git a/builder/parallels/common/hw_config.go b/builder/parallels/common/hw_config.go
index bc51c97a6..3515f3ea6 100644
--- a/builder/parallels/common/hw_config.go
+++ b/builder/parallels/common/hw_config.go
@@ -7,14 +7,18 @@ import (
)
type HWConfig struct {
-
- // cpu information
- CpuCount int `mapstructure:"cpus"`
- MemorySize int `mapstructure:"memory"`
-
- // device presence
- Sound bool `mapstructure:"sound"`
- USB bool `mapstructure:"usb"`
+ // The number of cpus to use for building the VM.
+ // Defaults to 1.
+ CpuCount int `mapstructure:"cpus" required:"false"`
+ // The amount of memory to use for building the VM in
+ // megabytes. Defaults to 512 megabytes.
+ MemorySize int `mapstructure:"memory" required:"false"`
+ // Specifies whether to enable the sound device when
+ // building the VM. Defaults to false.
+ Sound bool `mapstructure:"sound" required:"false"`
+ // Specifies whether to enable the USB bus when building
+ // the VM. Defaults to false.
+ USB bool `mapstructure:"usb" required:"false"`
}
func (c *HWConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/parallels/common/output_config.go b/builder/parallels/common/output_config.go
index f2e6cea50..204feffa8 100644
--- a/builder/parallels/common/output_config.go
+++ b/builder/parallels/common/output_config.go
@@ -11,7 +11,13 @@ import (
// OutputConfig contains the configuration for builder's output.
type OutputConfig struct {
- OutputDir string `mapstructure:"output_directory"`
+ // This is the path to the directory where the
+ // resulting virtual machine will be created. This may be relative or absolute.
+ // If relative, the path is relative to the working directory when packer
+ // is executed. This directory must not exist or be empty prior to running
+ // the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the
+ // name of the build.
+ OutputDir string `mapstructure:"output_directory" required:"false"`
}
// Prepare configures the output directory or returns an error if it already exists.
diff --git a/builder/parallels/common/prlctl_config.go b/builder/parallels/common/prlctl_config.go
index 5d42c5ea4..76aebf3e0 100644
--- a/builder/parallels/common/prlctl_config.go
+++ b/builder/parallels/common/prlctl_config.go
@@ -7,7 +7,17 @@ import (
// PrlctlConfig contains the configuration for running "prlctl" commands
// before the VM start.
type PrlctlConfig struct {
- Prlctl [][]string `mapstructure:"prlctl"`
+ // Custom prlctl commands to execute
+ // in order to further customize the virtual machine being created. The value
+ // of this is an array of commands to execute. The commands are executed in the
+ // order defined in the template. For each command, the command is defined
+ // itself as an array of strings, where each string represents a single
+ // argument on the command-line to prlctl (but excluding prlctl itself).
+ // Each arg is treated as a configuration
+ // template, where the Name
+ // variable is replaced with the VM name. More details on how to use prlctl
+ // are below.
+ Prlctl [][]string `mapstructure:"prlctl" required:"false"`
}
// Prepare sets the default value of "Prlctl" property.
diff --git a/builder/parallels/common/prlctl_post_config.go b/builder/parallels/common/prlctl_post_config.go
index 27f5cb234..526d12d29 100644
--- a/builder/parallels/common/prlctl_post_config.go
+++ b/builder/parallels/common/prlctl_post_config.go
@@ -7,7 +7,10 @@ import (
// PrlctlPostConfig contains the configuration for running "prlctl" commands
// in the end of artifact build.
type PrlctlPostConfig struct {
- PrlctlPost [][]string `mapstructure:"prlctl_post"`
+ // Identical to prlctl, except
+ // that it is run after the virtual machine is shutdown, and before the virtual
+ // machine is exported.
+ PrlctlPost [][]string `mapstructure:"prlctl_post" required:"false"`
}
// Prepare sets the default value of "PrlctlPost" property.
diff --git a/builder/parallels/common/prlctl_version_config.go b/builder/parallels/common/prlctl_version_config.go
index ab2641cb3..b31ade19a 100644
--- a/builder/parallels/common/prlctl_version_config.go
+++ b/builder/parallels/common/prlctl_version_config.go
@@ -6,7 +6,12 @@ import (
// PrlctlVersionConfig contains the configuration for `prlctl` version.
type PrlctlVersionConfig struct {
- PrlctlVersionFile string `mapstructure:"prlctl_version_file"`
+ // The path within the virtual machine to
+ // upload a file that contains the prlctl version that was used to create
+ // the machine. This information can be useful for provisioning. By default
+ // this is ".prlctl_version", which will generally upload it into the
+ // home directory.
+ PrlctlVersionFile string `mapstructure:"prlctl_version_file" required:"false"`
}
// Prepare sets the default value of "PrlctlVersionFile" property.
diff --git a/builder/parallels/common/shutdown_config.go b/builder/parallels/common/shutdown_config.go
index 4ebdf405f..f9a8626ef 100644
--- a/builder/parallels/common/shutdown_config.go
+++ b/builder/parallels/common/shutdown_config.go
@@ -9,8 +9,15 @@ import (
// ShutdownConfig contains the configuration for VM shutdown.
type ShutdownConfig struct {
- ShutdownCommand string `mapstructure:"shutdown_command"`
- RawShutdownTimeout string `mapstructure:"shutdown_timeout"`
+ // The command to use to gracefully shut down the
+ // machine once all the provisioning is done. By default this is an empty
+ // string, which tells Packer to just forcefully shut down the machine.
+ ShutdownCommand string `mapstructure:"shutdown_command" required:"false"`
+ // The amount of time to wait after executing the
+ // shutdown_command for the virtual machine to actually shut down. If it
+ // doesn't shut down in this time, it is an error. By default, the timeout is
+ // "5m", or five minutes.
+ RawShutdownTimeout string `mapstructure:"shutdown_timeout" required:"false"`
ShutdownTimeout time.Duration ``
}
diff --git a/builder/parallels/common/tools_config.go b/builder/parallels/common/tools_config.go
index c273d2e87..6a1d8c790 100644
--- a/builder/parallels/common/tools_config.go
+++ b/builder/parallels/common/tools_config.go
@@ -17,9 +17,26 @@ const (
// ToolsConfig contains the builder configuration related to Parallels Tools.
type ToolsConfig struct {
- ParallelsToolsFlavor string `mapstructure:"parallels_tools_flavor"`
- ParallelsToolsGuestPath string `mapstructure:"parallels_tools_guest_path"`
- ParallelsToolsMode string `mapstructure:"parallels_tools_mode"`
+ // The flavor of the Parallels Tools ISO to
+ // install into the VM. Valid values are "win", "lin", "mac", "os2"
+ // and "other". This can be omitted only if parallels_tools_mode
+ // is "disable".
+ ParallelsToolsFlavor string `mapstructure:"parallels_tools_flavor" required:"true"`
+ // The path in the virtual machine to
+ // upload Parallels Tools. This only takes effect if parallels_tools_mode
+ // is "upload". This is a configuration
+ // template that has a single
+ // valid variable: Flavor, which will be the value of
+ // parallels_tools_flavor. By default this is "prl-tools-{{.Flavor}}.iso"
+ // which should upload into the login directory of the user.
+ ParallelsToolsGuestPath string `mapstructure:"parallels_tools_guest_path" required:"false"`
+ // The method by which Parallels Tools are
+ // made available to the guest for installation. Valid options are "upload",
+ // "attach", or "disable". If the mode is "attach" the Parallels Tools ISO will
+ // be attached as a CD device to the virtual machine. If the mode is "upload"
+ // the Parallels Tools ISO will be uploaded to the path specified by
+ // parallels_tools_guest_path. The default value is "upload".
+ ParallelsToolsMode string `mapstructure:"parallels_tools_mode" required:"false"`
}
// Prepare validates & sets up configuration options related to Parallels Tools.
diff --git a/builder/parallels/iso/builder.go b/builder/parallels/iso/builder.go
index 287a47361..1ccdd934b 100644
--- a/builder/parallels/iso/builder.go
+++ b/builder/parallels/iso/builder.go
@@ -36,14 +36,44 @@ type Config struct {
parallelscommon.ShutdownConfig `mapstructure:",squash"`
parallelscommon.SSHConfig `mapstructure:",squash"`
parallelscommon.ToolsConfig `mapstructure:",squash"`
-
- DiskSize uint `mapstructure:"disk_size"`
- DiskType string `mapstructure:"disk_type"`
- GuestOSType string `mapstructure:"guest_os_type"`
- HardDriveInterface string `mapstructure:"hard_drive_interface"`
- HostInterfaces []string `mapstructure:"host_interfaces"`
- SkipCompaction bool `mapstructure:"skip_compaction"`
- VMName string `mapstructure:"vm_name"`
+ // The size, in megabytes, of the hard disk to create
+ // for the VM. By default, this is 40000 (about 40 GB).
+ DiskSize uint `mapstructure:"disk_size" required:"false"`
+ // The type for image file based virtual disk drives,
+ // defaults to expand. Valid options are expand (expanding disk) that the
+ // image file is small initially and grows in size as you add data to it, and
+ // plain (plain disk) that the image file has a fixed size from the moment it
+ // is created (i.e the space is allocated for the full drive). Plain disks
+ // perform faster than expanding disks. skip_compaction will be set to true
+ // automatically for plain disks.
+ DiskType string `mapstructure:"disk_type" required:"false"`
+ // The guest OS type being installed. By default
+ // this is "other", but you can get dramatic performance improvements by
+ // setting this to the proper value. To view all available values for this run
+ // prlctl create x --distribution list. Setting the correct value hints to
+ // Parallels Desktop how to optimize the virtual hardware to work best with
+ // that operating system.
+ GuestOSType string `mapstructure:"guest_os_type" required:"false"`
+ // The type of controller that the hard
+ // drives are attached to, defaults to "sata". Valid options are "sata", "ide",
+ // and "scsi".
+ HardDriveInterface string `mapstructure:"hard_drive_interface" required:"false"`
+ // A list of which interfaces on the
+ // host should be searched for a IP address. The first IP address found on one
+ // of these will be used as {{ .HTTPIP }} in the boot_command. Defaults to
+ // ["en0", "en1", "en2", "en3", "en4", "en5", "en6", "en7", "en8", "en9",
+ // "ppp0", "ppp1", "ppp2"].
+ HostInterfaces []string `mapstructure:"host_interfaces" required:"false"`
+ // Virtual disk image is compacted at the end of
+ // the build process using prl_disk_tool utility (except for the case that
+ // disk_type is set to plain). In certain rare cases, this might corrupt
+ // the resulting disk image. If you find this to be the case, you can disable
+ // compaction using this configuration value.
+ SkipCompaction bool `mapstructure:"skip_compaction" required:"false"`
+ // This is the name of the PVM directory for the new
+ // virtual machine, without the file extension. By default this is
+ // "packer-BUILDNAME", where "BUILDNAME" is the name of the build.
+ VMName string `mapstructure:"vm_name" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/parallels/pvm/config.go b/builder/parallels/pvm/config.go
index bcca5e724..99b54532b 100644
--- a/builder/parallels/pvm/config.go
+++ b/builder/parallels/pvm/config.go
@@ -24,11 +24,23 @@ type Config struct {
parallelscommon.ShutdownConfig `mapstructure:",squash"`
bootcommand.BootConfig `mapstructure:",squash"`
parallelscommon.ToolsConfig `mapstructure:",squash"`
-
- SourcePath string `mapstructure:"source_path"`
- SkipCompaction bool `mapstructure:"skip_compaction"`
- VMName string `mapstructure:"vm_name"`
- ReassignMAC bool `mapstructure:"reassign_mac"`
+ // The path to a PVM directory that acts as the source
+ // of this build.
+ SourcePath string `mapstructure:"source_path" required:"true"`
+ // Virtual disk image is compacted at the end of
+ // the build process using prl_disk_tool utility (except for the case that
+ // disk_type is set to plain). In certain rare cases, this might corrupt
+ // the resulting disk image. If you find this to be the case, you can disable
+ // compaction using this configuration value.
+ SkipCompaction bool `mapstructure:"skip_compaction" required:"false"`
+ // This is the name of the PVM directory for the new
+ // virtual machine, without the file extension. By default this is
+ // "packer-BUILDNAME", where "BUILDNAME" is the name of the build.
+ VMName string `mapstructure:"vm_name" required:"false"`
+ // If this is "false" the MAC address of the first
+ // NIC will reused when imported else a new MAC address will be generated
+ // by Parallels. Defaults to "false".
+ ReassignMAC bool `mapstructure:"reassign_mac" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go
index 89e8b6943..55df4c600 100644
--- a/builder/qemu/builder.go
+++ b/builder/qemu/builder.go
@@ -95,35 +95,134 @@ type Config struct {
bootcommand.VNCConfig `mapstructure:",squash"`
Comm communicator.Config `mapstructure:",squash"`
common.FloppyConfig `mapstructure:",squash"`
-
- ISOSkipCache bool `mapstructure:"iso_skip_cache"`
- Accelerator string `mapstructure:"accelerator"`
- CpuCount int `mapstructure:"cpus"`
- DiskInterface string `mapstructure:"disk_interface"`
- DiskSize uint `mapstructure:"disk_size"`
- DiskCache string `mapstructure:"disk_cache"`
- DiskDiscard string `mapstructure:"disk_discard"`
- DetectZeroes string `mapstructure:"disk_detect_zeroes"`
- SkipCompaction bool `mapstructure:"skip_compaction"`
- DiskCompression bool `mapstructure:"disk_compression"`
- Format string `mapstructure:"format"`
- Headless bool `mapstructure:"headless"`
- DiskImage bool `mapstructure:"disk_image"`
- UseBackingFile bool `mapstructure:"use_backing_file"`
- MachineType string `mapstructure:"machine_type"`
- MemorySize int `mapstructure:"memory"`
- NetDevice string `mapstructure:"net_device"`
- OutputDir string `mapstructure:"output_directory"`
- QemuArgs [][]string `mapstructure:"qemuargs"`
- QemuBinary string `mapstructure:"qemu_binary"`
- ShutdownCommand string `mapstructure:"shutdown_command"`
- SSHHostPortMin int `mapstructure:"ssh_host_port_min"`
+ // Use iso from provided url. Qemu must support
+ // curl block device. This defaults to false.
+ ISOSkipCache bool `mapstructure:"iso_skip_cache" required:"false"`
+ // The accelerator type to use when running the VM.
+ // This may be none, kvm, tcg, hax, hvf, whpx, or xen. The appropriate
+ // software must have already been installed on your build machine to use the
+ // accelerator you specified. When no accelerator is specified, Packer will try
+ // to use kvm if it is available but will default to tcg otherwise.
+ Accelerator string `mapstructure:"accelerator" required:"false"`
+ // The number of cpus to use when building the VM.
+ // The default is 1 CPU.
+ CpuCount int `mapstructure:"cpus" required:"false"`
+ // The interface to use for the disk. Allowed
+ // values include any of ide, scsi, virtio or virtio-scsi*. Note
+ // also that any boot commands or kickstart type scripts must have proper
+ // adjustments for resulting device names. The Qemu builder uses virtio by
+ // default.
+ DiskInterface string `mapstructure:"disk_interface" required:"false"`
+ // The size, in megabytes, of the hard disk to create
+ // for the VM. By default, this is 40960 (40 GB).
+ DiskSize uint `mapstructure:"disk_size" required:"false"`
+ // The cache mode to use for disk. Allowed values
+ // include any of writethrough, writeback, none, unsafe
+ // or directsync. By default, this is set to writeback.
+ DiskCache string `mapstructure:"disk_cache" required:"false"`
+ // The discard mode to use for disk. Allowed values
+ // include any of unmap or ignore. By default, this is set to ignore.
+ DiskDiscard string `mapstructure:"disk_discard" required:"false"`
+ // The detect-zeroes mode to use for disk.
+ // Allowed values include any of unmap, on or off. Defaults to off.
+ // When the value is "off" we don't set the flag in the qemu command, so that
+ // Packer still works with old versions of QEMU that don't have this option.
+ DetectZeroes string `mapstructure:"disk_detect_zeroes" required:"false"`
+ // Packer compacts the QCOW2 image using
+ // qemu-img convert. Set this option to true to disable compacting.
+ // Defaults to false.
+ SkipCompaction bool `mapstructure:"skip_compaction" required:"false"`
+ // Apply compression to the QCOW2 disk file
+ // using qemu-img convert. Defaults to false.
+ DiskCompression bool `mapstructure:"disk_compression" required:"false"`
+ // Either qcow2 or raw, this specifies the output
+ // format of the virtual machine image. This defaults to qcow2.
+ Format string `mapstructure:"format" required:"false"`
+ // Packer defaults to building QEMU virtual machines by
+ // launching a GUI that shows the console of the machine being built. When this
+ // value is set to true, the machine will start without a console.
+ Headless bool `mapstructure:"headless" required:"false"`
+ // Packer defaults to building from an ISO file, this
+ // parameter controls whether the ISO URL supplied is actually a bootable
+ // QEMU image. When this value is set to true, the machine will either clone
+ // the source or use it as a backing file (if use_backing_file is true);
+ // then, it will resize the image according to disk_size and boot it.
+ DiskImage bool `mapstructure:"disk_image" required:"false"`
+ // Only applicable when disk_image is true
+ // and format is qcow2, set this option to true to create a new QCOW2
+ // file that uses the file located at iso_url as a backing file. The new file
+ // will only contain blocks that have changed compared to the backing file, so
+ // enabling this option can significantly reduce disk usage.
+ UseBackingFile bool `mapstructure:"use_backing_file" required:"false"`
+ // The type of machine emulation to use. Run your
+ // qemu binary with the flags -machine help to list available types for
+ // your system. This defaults to pc.
+ MachineType string `mapstructure:"machine_type" required:"false"`
+ // The amount of memory to use when building the VM
+ // in megabytes. This defaults to 512 megabytes.
+ MemorySize int `mapstructure:"memory" required:"false"`
+ // The driver to use for the network interface. Allowed
+ // values ne2k_pci, i82551, i82557b, i82559er, rtl8139, e1000,
+ // pcnet, virtio, virtio-net, virtio-net-pci, usb-net, i82559a,
+ // i82559b, i82559c, i82550, i82562, i82557a, i82557c, i82801,
+ // vmxnet3, i82558a or i82558b. The Qemu builder uses virtio-net by
+ // default.
+ NetDevice string `mapstructure:"net_device" required:"false"`
+ // This is the path to the directory where the
+ // resulting virtual machine will be created. This may be relative or absolute.
+ // If relative, the path is relative to the working directory when packer
+ // is executed. This directory must not exist or be empty prior to running
+ // the builder. By default this is output-BUILDNAME where "BUILDNAME" is the
+ // name of the build.
+ OutputDir string `mapstructure:"output_directory" required:"false"`
+ // Allows complete control over the
+ // qemu command line (though not, at this time, qemu-img). Each array of
+ // strings makes up a command line switch that overrides matching default
+ // switch/value pairs. Any value specified as an empty string is ignored. All
+ // values after the switch are concatenated with no separator.
+ QemuArgs [][]string `mapstructure:"qemuargs" required:"false"`
+ // The name of the Qemu binary to look for. This
+ // defaults to qemu-system-x86_64, but may need to be changed for
+ // some platforms. For example qemu-kvm, or qemu-system-i386 may be a
+ // better choice for some systems.
+ QemuBinary string `mapstructure:"qemu_binary" required:"false"`
+ // The command to use to gracefully shut down the
+ // machine once all the provisioning is done. By default this is an empty
+ // string, which tells Packer to just forcefully shut down the machine unless a
+ // shutdown command takes place inside script so this may safely be omitted. It
+ // is important to add a shutdown_command. By default Packer halts the virtual
+ // machine and the file system may not be sync'd. Thus, changes made in a
+ // provisioner might not be saved. If one or more scripts require a reboot it is
+ // suggested to leave this blank since reboots may fail and specify the final
+ // shutdown command in your last script.
+ ShutdownCommand string `mapstructure:"shutdown_command" required:"false"`
+ // The minimum and
+ // maximum port to use for the SSH port on the host machine which is forwarded
+ // to the SSH port on the guest machine. Because Packer often runs in parallel,
+ // Packer will choose a randomly available port in this range to use as the
+ // host port. By default this is 2222 to 4444.
+ SSHHostPortMin int `mapstructure:"ssh_host_port_min" required:"false"`
SSHHostPortMax int `mapstructure:"ssh_host_port_max"`
- UseDefaultDisplay bool `mapstructure:"use_default_display"`
- VNCBindAddress string `mapstructure:"vnc_bind_address"`
- VNCPortMin int `mapstructure:"vnc_port_min"`
+ // If true, do not pass a -display option
+ // to qemu, allowing it to choose the default. This may be needed when running
+ // under macOS, and getting errors about sdl not being available.
+ UseDefaultDisplay bool `mapstructure:"use_default_display" required:"false"`
+ // The IP address that should be
+ // binded to for VNC. By default packer will use 127.0.0.1 for this. If you
+ // wish to bind to all interfaces use 0.0.0.0.
+ VNCBindAddress string `mapstructure:"vnc_bind_address" required:"false"`
+ // The minimum and maximum port
+ // to use for VNC access to the virtual machine. The builder uses VNC to type
+ // the initial boot_command. Because Packer generally runs in parallel,
+ // Packer uses a randomly chosen port in this range that appears available. By
+ // default this is 5900 to 6000. The minimum and maximum ports are inclusive.
+ VNCPortMin int `mapstructure:"vnc_port_min" required:"false"`
VNCPortMax int `mapstructure:"vnc_port_max"`
- VMName string `mapstructure:"vm_name"`
+ // This is the name of the image (QCOW2 or IMG) file for
+ // the new virtual machine. By default this is packer-BUILDNAME, where
+ // "BUILDNAME" is the name of the build. Currently, no file extension will be
+ // used unless it is specified in this option.
+ VMName string `mapstructure:"vm_name" required:"false"`
// These are deprecated, but we keep them around for BC
// TODO(@mitchellh): remove
@@ -131,8 +230,11 @@ type Config struct {
// TODO(mitchellh): deprecate
RunOnce bool `mapstructure:"run_once"`
-
- RawShutdownTimeout string `mapstructure:"shutdown_timeout"`
+ // The amount of time to wait after executing the
+ // shutdown_command for the virtual machine to actually shut down. If it
+ // doesn't shut down in this time, it is an error. By default, the timeout is
+ // 5m or five minutes.
+ RawShutdownTimeout string `mapstructure:"shutdown_timeout" required:"false"`
shutdownTimeout time.Duration ``
ctx interpolate.Context
diff --git a/builder/scaleway/config.go b/builder/scaleway/config.go
index 45a8217e8..6b8cabdba 100644
--- a/builder/scaleway/config.go
+++ b/builder/scaleway/config.go
@@ -19,19 +19,48 @@ import (
type Config struct {
common.PackerConfig `mapstructure:",squash"`
Comm communicator.Config `mapstructure:",squash"`
-
- Token string `mapstructure:"api_token"`
- Organization string `mapstructure:"organization_id"`
-
- Region string `mapstructure:"region"`
- Image string `mapstructure:"image"`
- CommercialType string `mapstructure:"commercial_type"`
-
- SnapshotName string `mapstructure:"snapshot_name"`
- ImageName string `mapstructure:"image_name"`
- ServerName string `mapstructure:"server_name"`
- Bootscript string `mapstructure:"bootscript"`
- BootType string `mapstructure:"boottype"`
+ // The token to use to authenticate with your account.
+ // It can also be specified via environment variable SCALEWAY_API_TOKEN. You
+ // can see and generate tokens in the "Credentials"
+ // section of the control panel.
+ Token string `mapstructure:"api_token" required:"true"`
+ // The organization id to use to identify your
+ // organization. It can also be specified via environment variable
+ // SCALEWAY_ORGANIZATION. Your organization id is available in the
+ // "Account" section of the
+ // control panel.
+ // Previously named: api_access_key with environment variable: SCALEWAY_API_ACCESS_KEY
+ Organization string `mapstructure:"organization_id" required:"true"`
+ // The name of the region to launch the server in (par1
+ // or ams1). Consequently, this is the region where the snapshot will be
+ // available.
+ Region string `mapstructure:"region" required:"true"`
+ // The UUID of the base image to use. This is the image
+ // that will be used to launch a new server and provision it. See
+ // the images list
+ // get the complete list of the accepted image UUID.
+ Image string `mapstructure:"image" required:"true"`
+ // The name of the server commercial type:
+ // ARM64-128GB, ARM64-16GB, ARM64-2GB, ARM64-32GB, ARM64-4GB,
+ // ARM64-64GB, ARM64-8GB, C1, C2L, C2M, C2S, START1-L,
+ // START1-M, START1-S, START1-XS, X64-120GB, X64-15GB, X64-30GB,
+ // X64-60GB
+ CommercialType string `mapstructure:"commercial_type" required:"true"`
+ // The name of the resulting snapshot that will
+ // appear in your account. Default packer-TIMESTAMP
+ SnapshotName string `mapstructure:"snapshot_name" required:"false"`
+ // The name of the resulting image that will appear in
+ // your account. Default packer-TIMESTAMP
+ ImageName string `mapstructure:"image_name" required:"false"`
+ // The name assigned to the server. Default
+ // packer-UUID
+ ServerName string `mapstructure:"server_name" required:"false"`
+ // The id of an existing bootscript to use when
+ // booting the server.
+ Bootscript string `mapstructure:"bootscript" required:"false"`
+ // The type of boot, can be either local or
+ // bootscript, Default bootscript
+ BootType string `mapstructure:"boottype" required:"false"`
UserAgent string
ctx interpolate.Context
diff --git a/builder/tencentcloud/cvm/access_config.go b/builder/tencentcloud/cvm/access_config.go
index d7bb800bd..6c7c16584 100644
--- a/builder/tencentcloud/cvm/access_config.go
+++ b/builder/tencentcloud/cvm/access_config.go
@@ -44,11 +44,22 @@ var ValidRegions = []Region{
}
type TencentCloudAccessConfig struct {
- SecretId string `mapstructure:"secret_id"`
- SecretKey string `mapstructure:"secret_key"`
- Region string `mapstructure:"region"`
- Zone string `mapstructure:"zone"`
- SkipValidation bool `mapstructure:"skip_region_validation"`
+ // Tencentcloud secret id. You should set it directly,
+ // or set the TENCENTCLOUD_ACCESS_KEY environment variable.
+ SecretId string `mapstructure:"secret_id" required:"true"`
+ // Tencentcloud secret key. You should set it directly,
+ // or set the TENCENTCLOUD_SECRET_KEY environment variable.
+ SecretKey string `mapstructure:"secret_key" required:"true"`
+ // The region where your cvm will be launch. You should
+ // reference Region and Zone
+ // for parameter taking.
+ Region string `mapstructure:"region" required:"true"`
+ // The zone where your cvm will be launch. You should
+ // reference Region and Zone
+ // for parameter taking.
+ Zone string `mapstructure:"zone" required:"true"`
+ // Do not check region and zone when validate.
+ SkipValidation bool `mapstructure:"skip_region_validation" required:"false"`
}
func (cf *TencentCloudAccessConfig) Client() (*cvm.Client, *vpc.Client, error) {
diff --git a/builder/tencentcloud/cvm/image_config.go b/builder/tencentcloud/cvm/image_config.go
index c0e9386d8..b6ece44de 100644
--- a/builder/tencentcloud/cvm/image_config.go
+++ b/builder/tencentcloud/cvm/image_config.go
@@ -8,15 +8,29 @@ import (
)
type TencentCloudImageConfig struct {
- ImageName string `mapstructure:"image_name"`
- ImageDescription string `mapstructure:"image_description"`
- Reboot bool `mapstructure:"reboot"`
- ForcePoweroff bool `mapstructure:"force_poweroff"`
- Sysprep bool `mapstructure:"sysprep"`
+ // The name you want to create your customize image,
+ // it should be composed of no more than 20 characters, of letters, numbers
+ // or minus sign.
+ ImageName string `mapstructure:"image_name" required:"true"`
+ // Image description.
+ ImageDescription string `mapstructure:"image_description" required:"false"`
+ // Whether shutdown cvm to create Image. Default value is
+ // false.
+ Reboot bool `mapstructure:"reboot" required:"false"`
+ // Whether to force power off cvm when create image.
+ // Default value is false.
+ ForcePoweroff bool `mapstructure:"force_poweroff" required:"false"`
+ // Whether enable Sysprep during creating windows image.
+ Sysprep bool `mapstructure:"sysprep" required:"false"`
ImageForceDelete bool `mapstructure:"image_force_delete"`
- ImageCopyRegions []string `mapstructure:"image_copy_regions"`
- ImageShareAccounts []string `mapstructure:"image_share_accounts"`
- SkipValidation bool `mapstructure:"skip_region_validation"`
+ // regions that will be copied to after
+ // your image created.
+ ImageCopyRegions []string `mapstructure:"image_copy_regions" required:"false"`
+ // accounts that will be shared to
+ // after your image created.
+ ImageShareAccounts []string `mapstructure:"image_share_accounts" required:"false"`
+ // Do not check region and zone when validate.
+ SkipValidation bool `mapstructure:"skip_region_validation" required:"false"`
}
func (cf *TencentCloudImageConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/tencentcloud/cvm/run_config.go b/builder/tencentcloud/cvm/run_config.go
index a4f9320d0..ee304e0cb 100644
--- a/builder/tencentcloud/cvm/run_config.go
+++ b/builder/tencentcloud/cvm/run_config.go
@@ -11,26 +11,54 @@ import (
)
type TencentCloudRunConfig struct {
- AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address"`
- SourceImageId string `mapstructure:"source_image_id"`
- InstanceType string `mapstructure:"instance_type"`
- InstanceName string `mapstructure:"instance_name"`
- DiskType string `mapstructure:"disk_type"`
- DiskSize int64 `mapstructure:"disk_size"`
- VpcId string `mapstructure:"vpc_id"`
- VpcName string `mapstructure:"vpc_name"`
+ // Whether allocate public ip to your cvm.
+ // Default value is false.
+ AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address" required:"false"`
+ // The base image id of Image you want to create
+ // your customized image from.
+ SourceImageId string `mapstructure:"source_image_id" required:"true"`
+ // The instance type your cvm will be launched by.
+ // You should reference Instace Type
+ // for parameter taking.
+ InstanceType string `mapstructure:"instance_type" required:"true"`
+ // Instance name.
+ InstanceName string `mapstructure:"instance_name" required:"false"`
+ // Root disk type your cvm will be launched by. you could
+ // reference Disk Type
+ // for parameter taking.
+ DiskType string `mapstructure:"disk_type" required:"false"`
+ // Root disk size your cvm will be launched by. values range(in GB):
+ DiskSize int64 `mapstructure:"disk_size" required:"false"`
+ // Specify vpc your cvm will be launched by.
+ VpcId string `mapstructure:"vpc_id" required:"false"`
+ // Specify vpc name you will create. if vpc_id is not set, packer will
+ // create a vpc for you named this parameter.
+ VpcName string `mapstructure:"vpc_name" required:"false"`
VpcIp string `mapstructure:"vpc_ip"`
- SubnetId string `mapstructure:"subnet_id"`
- SubnetName string `mapstructure:"subnet_name"`
- CidrBlock string `mapstructure:"cidr_block"` // 10.0.0.0/16(default), 172.16.0.0/12, 192.168.0.0/16
- SubnectCidrBlock string `mapstructure:"subnect_cidr_block"`
+ // Specify subnet your cvm will be launched by.
+ SubnetId string `mapstructure:"subnet_id" required:"false"`
+ // Specify subnet name you will create. if subnet_id is not set, packer will
+ // create a subnet for you named this parameter.
+ SubnetName string `mapstructure:"subnet_name" required:"false"`
+ // Specify cider block of the vpc you will create if vpc_id not set
+ CidrBlock string `mapstructure:"cidr_block" required:"false"` // 10.0.0.0/16(default), 172.16.0.0/12, 192.168.0.0/16
+ // Specify cider block of the subnet you will create if
+ // subnet_id not set
+ SubnectCidrBlock string `mapstructure:"subnect_cidr_block" required:"false"`
InternetChargeType string `mapstructure:"internet_charge_type"`
- InternetMaxBandwidthOut int64 `mapstructure:"internet_max_bandwidth_out"`
- SecurityGroupId string `mapstructure:"security_group_id"`
- SecurityGroupName string `mapstructure:"security_group_name"`
- UserData string `mapstructure:"user_data"`
- UserDataFile string `mapstructure:"user_data_file"`
- HostName string `mapstructure:"host_name"`
+ // Max bandwidth out your cvm will be launched by(in MB).
+ // values can be set between 1 ~ 100.
+ InternetMaxBandwidthOut int64 `mapstructure:"internet_max_bandwidth_out" required:"false"`
+ // Specify security group your cvm will be launched by.
+ SecurityGroupId string `mapstructure:"security_group_id" required:"false"`
+ // Specify security name you will create if security_group_id not set.
+ SecurityGroupName string `mapstructure:"security_group_name" required:"false"`
+ // userdata.
+ UserData string `mapstructure:"user_data" required:"false"`
+ // userdata file.
+ UserDataFile string `mapstructure:"user_data_file" required:"false"`
+ // host name.
+ HostName string `mapstructure:"host_name" required:"false"`
// Communicator settings
Comm communicator.Config `mapstructure:",squash"`
diff --git a/builder/triton/access_config.go b/builder/triton/access_config.go
index 3fe380e73..27196b81d 100644
--- a/builder/triton/access_config.go
+++ b/builder/triton/access_config.go
@@ -17,12 +17,32 @@ import (
// AccessConfig is for common configuration related to Triton access
type AccessConfig struct {
- Endpoint string `mapstructure:"triton_url"`
- Account string `mapstructure:"triton_account"`
- Username string `mapstructure:"triton_user"`
- KeyID string `mapstructure:"triton_key_id"`
- KeyMaterial string `mapstructure:"triton_key_material"`
- InsecureSkipTLSVerify bool `mapstructure:"insecure_skip_tls_verify"`
+ // The URL of the Triton cloud API to use. If omitted
+ // it will default to the us-sw-1 region of the Joyent Public cloud. If you
+ // are using your own private Triton installation you will have to supply the
+ // URL of the cloud API of your own Triton installation.
+ Endpoint string `mapstructure:"triton_url" required:"false"`
+ // The username of the Triton account to use when
+ // using the Triton Cloud API.
+ Account string `mapstructure:"triton_account" required:"true"`
+ // The username of a user who has access to your
+ // Triton account.
+ Username string `mapstructure:"triton_user" required:"false"`
+ // The fingerprint of the public key of the SSH key
+ // pair to use for authentication with the Triton Cloud API. If
+ // triton_key_material is not set, it is assumed that the SSH agent has the
+ // private key corresponding to this key ID loaded.
+ KeyID string `mapstructure:"triton_key_id" required:"true"`
+ // Path to the file in which the private key
+ // of triton_key_id is stored. For example /home/soandso/.ssh/id_rsa. If
+ // this is not specified, the SSH agent is used to sign requests with the
+ // triton_key_id specified.
+ KeyMaterial string `mapstructure:"triton_key_material" required:"false"`
+ //secure_skip_tls_verify - (bool) This allows skipping TLS verification
+ // of the Triton endpoint. It is useful when connecting to a temporary Triton
+ // installation such as Cloud-On-A-Laptop which does not generally use a
+ // certificate signed by a trusted root CA. The default is false.
+ InsecureSkipTLSVerify bool `mapstructure:"insecure_skip_tls_verify" required:"false"`
signer authentication.Signer
}
diff --git a/builder/triton/source_machine_config.go b/builder/triton/source_machine_config.go
index 50c61da2d..5549d0549 100644
--- a/builder/triton/source_machine_config.go
+++ b/builder/triton/source_machine_config.go
@@ -9,14 +9,60 @@ import (
// SourceMachineConfig represents the configuration to run a machine using
// the SDC API in order for provisioning to take place.
type SourceMachineConfig struct {
- MachineName string `mapstructure:"source_machine_name"`
- MachinePackage string `mapstructure:"source_machine_package"`
- MachineImage string `mapstructure:"source_machine_image"`
- MachineNetworks []string `mapstructure:"source_machine_networks"`
- MachineMetadata map[string]string `mapstructure:"source_machine_metadata"`
- MachineTags map[string]string `mapstructure:"source_machine_tags"`
- MachineFirewallEnabled bool `mapstructure:"source_machine_firewall_enabled"`
- MachineImageFilters MachineImageFilter `mapstructure:"source_machine_image_filter"`
+ // Name of the VM used for building the
+ // image. Does not affect (and does not have to be the same) as the name for a
+ // VM instance running this image. Maximum 512 characters but should in
+ // practice be much shorter (think between 5 and 20 characters). For example
+ // mysql-64-server-image-builder. When omitted defaults to
+ // packer-builder-[image_name].
+ MachineName string `mapstructure:"source_machine_name" required:"false"`
+ // The Triton package to use while
+ // building the image. Does not affect (and does not have to be the same) as
+ // the package which will be used for a VM instance running this image. On the
+ // Joyent public cloud this could for example be g3-standard-0.5-smartos.
+ MachinePackage string `mapstructure:"source_machine_package" required:"true"`
+ // The UUID of the image to base the new
+ // image on. Triton supports multiple types of images, called 'brands' in
+ // Triton / Joyent lingo, for contains and VM's. See the chapter Containers
+ // and virtual machines in
+ // the Joyent Triton documentation for detailed information. The following
+ // brands are currently supported by this builder:joyent andkvm. The
+ // choice of base image automatically decides the brand. On the Joyent public
+ // cloud a valid source_machine_image could for example be
+ // 70e3ae72-96b6-11e6-9056-9737fd4d0764 for version 16.3.1 of the 64bit
+ // SmartOS base image (a 'joyent' brand image). source_machine_image_filter
+ // can be used to populate this UUID.
+ MachineImage string `mapstructure:"source_machine_image" required:"true"`
+ // The UUID's of Triton
+ // networks added to the source machine used for creating the image. For
+ // example if any of the provisioners which are run need Internet access you
+ // will need to add the UUID's of the appropriate networks here. If this is
+ // not specified, instances will be placed into the default Triton public and
+ // internal networks.
+ MachineNetworks []string `mapstructure:"source_machine_networks" required:"false"`
+ // Triton metadata
+ // applied to the VM used to create the image. Metadata can be used to pass
+ // configuration information to the VM without the need for networking. See
+ // Using the metadata
+ // API in the
+ // Joyent documentation for more information. This can for example be used to
+ // set the user-script metadata key to have Triton start a user supplied
+ // script after the VM has booted.
+ MachineMetadata map[string]string `mapstructure:"source_machine_metadata" required:"false"`
+ // Tags applied to the
+ // VM used to create the image.
+ MachineTags map[string]string `mapstructure:"source_machine_tags" required:"false"`
+ // Whether or not the firewall
+ // of the VM used to create an image of is enabled. The Triton firewall only
+ // filters inbound traffic to the VM. All outbound traffic is always allowed.
+ // Currently this builder does not provide an interface to add specific
+ // firewall rules. Unless you have a global rule defined in Triton which
+ // allows SSH traffic enabling the firewall will interfere with the SSH
+ // provisioner. The default is false.
+ MachineFirewallEnabled bool `mapstructure:"source_machine_firewall_enabled" required:"false"`
+ // Filters used to populate the
+ // source_machine_image field. Example:
+ MachineImageFilters MachineImageFilter `mapstructure:"source_machine_image_filter" required:"false"`
}
type MachineImageFilter struct {
diff --git a/builder/triton/target_image_config.go b/builder/triton/target_image_config.go
index 53a9f58f7..183ae1932 100644
--- a/builder/triton/target_image_config.go
+++ b/builder/triton/target_image_config.go
@@ -9,13 +9,31 @@ import (
// TargetImageConfig represents the configuration for the image to be created
// from the source machine.
type TargetImageConfig struct {
- ImageName string `mapstructure:"image_name"`
- ImageVersion string `mapstructure:"image_version"`
- ImageDescription string `mapstructure:"image_description"`
- ImageHomepage string `mapstructure:"image_homepage"`
- ImageEULA string `mapstructure:"image_eula_url"`
- ImageACL []string `mapstructure:"image_acls"`
- ImageTags map[string]string `mapstructure:"image_tags"`
+ // The name the finished image in Triton will be
+ // assigned. Maximum 512 characters but should in practice be much shorter
+ // (think between 5 and 20 characters). For example postgresql-95-server for
+ // an image used as a PostgreSQL 9.5 server.
+ ImageName string `mapstructure:"image_name" required:"true"`
+ // The version string for this image. Maximum 128
+ // characters. Any string will do but a format of Major.Minor.Patch is
+ // strongly advised by Joyent. See Semantic Versioning
+ // for more information on the Major.Minor.Patch versioning format.
+ ImageVersion string `mapstructure:"image_version" required:"true"`
+ // Description of the image. Maximum 512
+ // characters.
+ ImageDescription string `mapstructure:"image_description" required:"false"`
+ // URL of the homepage where users can find
+ // information about the image. Maximum 128 characters.
+ ImageHomepage string `mapstructure:"image_homepage" required:"false"`
+ // URL of the End User License Agreement (EULA)
+ // for the image. Maximum 128 characters.
+ ImageEULA string `mapstructure:"image_eula_url" required:"false"`
+ // The UUID's of the users which will have
+ // access to this image. When omitted only the owner (the Triton user whose
+ // credentials are used) will have access to the image.
+ ImageACL []string `mapstructure:"image_acls" required:"false"`
+ // Tag applied to the image.
+ ImageTags map[string]string `mapstructure:"image_tags" required:"false"`
}
// Prepare performs basic validation on a TargetImageConfig struct.
diff --git a/builder/vagrant/builder.go b/builder/vagrant/builder.go
index 06b22cbcd..ccd571777 100644
--- a/builder/vagrant/builder.go
+++ b/builder/vagrant/builder.go
@@ -36,39 +36,94 @@ type Config struct {
common.FloppyConfig `mapstructure:",squash"`
bootcommand.BootConfig `mapstructure:",squash"`
SSHConfig `mapstructure:",squash"`
-
- // This is the name of the new virtual machine.
- // By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build.
- OutputDir string `mapstructure:"output_dir"`
- SourceBox string `mapstructure:"source_path"`
- GlobalID string `mapstructure:"global_id"`
- Checksum string `mapstructure:"checksum"`
- ChecksumType string `mapstructure:"checksum_type"`
- BoxName string `mapstructure:"box_name"`
-
- Provider string `mapstructure:"provider"`
+ // The directory to create that will contain
+ // your output box. We always create this directory and run from inside of it to
+ // prevent Vagrant init collisions. If unset, it will be set to packer- plus
+ // your buildname.
+ OutputDir string `mapstructure:"output_dir" required:"false"`
+ // URL of the vagrant box to use, or the name of the
+ // vagrant box. hashicorp/precise64, ./mylocalbox.box and
+ // https://example.com/my-box.box are all valid source boxes. If your
+ // source is a .box file, whether locally or from a URL like the latter example
+ // above, you will also need to provide a box_name. This option is required,
+ // unless you set global_id. You may only set one or the other, not both.
+ SourceBox string `mapstructure:"source_path" required:"true"`
+ // the global id of a Vagrant box already added to Vagrant
+ // on your system. You can find the global id of your Vagrant boxes using the
+ // command vagrant global-status; your global_id will be a 7-digit number and
+ // letter comination that you'll find in the leftmost column of the
+ // global-status output. If you choose to use global_id instead of
+ // source_box, Packer will skip the Vagrant initialize and add steps, and
+ // simply launch the box directly using the global id.
+ GlobalID string `mapstructure:"global_id" required:"true"`
+ // The checksum for the .box file. The type of the
+ // checksum is specified with checksum_type, documented below.
+ Checksum string `mapstructure:"checksum" required:"false"`
+ // The type of the checksum specified in checksum.
+ // Valid values are none, md5, sha1, sha256, or sha512. Although the
+ // checksum will not be verified when checksum_type is set to "none", this is
+ // not recommended since OVA files can be very large and corruption does happen
+ // from time to time.
+ ChecksumType string `mapstructure:"checksum_type" required:"false"`
+ // if your source_box is a boxfile that we need to add
+ // to Vagrant, this is the name to give it. If left blank, will default to
+ // "packer_" plus your buildname.
+ BoxName string `mapstructure:"box_name" required:"false"`
+ // The vagrant provider.
+ // This parameter is required when source_path have more than one provider,
+ // or when using vagrant-cloud post-processor. Defaults to unset.
+ Provider string `mapstructure:"provider" required:"false"`
Communicator string `mapstructure:"communicator"`
- // Whether to Halt, Suspend, or Destroy the box
- TeardownMethod string `mapstructure:"teardown_method"`
-
// Options for the "vagrant init" command
- BoxVersion string `mapstructure:"box_version"`
- Template string `mapstructure:"template"`
+
+ // What vagrantfile to use
+ VagrantfileTpl string `mapstructure:"vagrantfile_template"`
+ // Whether to halt, suspend, or destroy the box when
+ // the build has completed. Defaults to "halt"
+ TeardownMethod string `mapstructure:"teardown_method" required:"false"`
+ // What box version to use when initializing Vagrant.
+ BoxVersion string `mapstructure:"box_version" required:"false"`
+ // a path to a golang template for a
+ // vagrantfile. Our default template can be found
+ // here. So far the only template variables available to you are {{ .BoxName }} and
+ // {{ .SyncedFolder }}, which correspond to the Packer options box_name and
+ // synced_folder.
+ Template string `mapstructure:"template" required:"false"`
+
SyncedFolder string `mapstructure:"synced_folder"`
-
- // Options for the "vagrant box add" command
- SkipAdd bool `mapstructure:"skip_add"`
- AddCACert string `mapstructure:"add_cacert"`
- AddCAPath string `mapstructure:"add_capath"`
- AddCert string `mapstructure:"add_cert"`
- AddClean bool `mapstructure:"add_clean"`
- AddForce bool `mapstructure:"add_force"`
- AddInsecure bool `mapstructure:"add_insecure"`
-
- // Don't package the Vagrant box after build.
- SkipPackage bool `mapstructure:"skip_package"`
+ // Don't call "vagrant add" to add the box to your local
+ // environment; this is necessary if you want to launch a box that is already
+ // added to your vagrant environment.
+ SkipAdd bool `mapstructure:"skip_add" required:"false"`
+ // Equivalent to setting the
+ // --cacert
+ // option in vagrant add; defaults to unset.
+ AddCACert string `mapstructure:"add_cacert" required:"false"`
+ // Equivalent to setting the
+ // --capath option
+ // in vagrant add; defaults to unset.
+ AddCAPath string `mapstructure:"add_capath" required:"false"`
+ // Equivalent to setting the
+ // --cert option in
+ // vagrant add; defaults to unset.
+ AddCert string `mapstructure:"add_cert" required:"false"`
+ // Equivalent to setting the
+ // --clean flag in
+ // vagrant add; defaults to unset.
+ AddClean bool `mapstructure:"add_clean" required:"false"`
+ // Equivalent to setting the
+ // --force flag in
+ // vagrant add; defaults to unset.
+ AddForce bool `mapstructure:"add_force" required:"false"`
+ // Equivalent to setting the
+ // --insecure flag in
+ // vagrant add; defaults to unset.
+ AddInsecure bool `mapstructure:"add_insecure" required:"false"`
+ // if true, Packer will not call vagrant package to
+ // package your base box into its own standalone .box file.
+ SkipPackage bool `mapstructure:"skip_package" required:"false"`
OutputVagrantfile string `mapstructure:"output_vagrantfile"`
PackageInclude []string `mapstructure:"package_include"`
diff --git a/builder/virtualbox/common/export_config.go b/builder/virtualbox/common/export_config.go
index b55941943..a5af812b6 100644
--- a/builder/virtualbox/common/export_config.go
+++ b/builder/virtualbox/common/export_config.go
@@ -7,7 +7,9 @@ import (
)
type ExportConfig struct {
- Format string `mapstructure:"format"`
+ // Either ovf or ova, this specifies the output format
+ // of the exported virtual machine. This defaults to ovf.
+ Format string `mapstructure:"format" required:"false"`
}
func (c *ExportConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/virtualbox/common/export_opts.go b/builder/virtualbox/common/export_opts.go
index de94a7caa..ad40b1d3c 100644
--- a/builder/virtualbox/common/export_opts.go
+++ b/builder/virtualbox/common/export_opts.go
@@ -5,7 +5,12 @@ import (
)
type ExportOpts struct {
- ExportOpts []string `mapstructure:"export_opts"`
+ // Additional options to pass to the
+ // VBoxManage
+ // export. This
+ // can be useful for passing product information to include in the resulting
+ // appliance file. Packer JSON configuration file example:
+ ExportOpts []string `mapstructure:"export_opts" required:"false"`
}
func (c *ExportOpts) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/virtualbox/common/guest_additions_config.go b/builder/virtualbox/common/guest_additions_config.go
index 22d16b716..eb2b5e278 100644
--- a/builder/virtualbox/common/guest_additions_config.go
+++ b/builder/virtualbox/common/guest_additions_config.go
@@ -16,7 +16,14 @@ const (
type GuestAdditionsConfig struct {
Communicator string `mapstructure:"communicator"`
- GuestAdditionsMode string `mapstructure:"guest_additions_mode"`
+ // The method by which guest additions are
+ // made available to the guest for installation. Valid options are upload,
+ // attach, or disable. If the mode is attach the guest additions ISO will
+ // be attached as a CD device to the virtual machine. If the mode is upload
+ // the guest additions ISO will be uploaded to the path specified by
+ // guest_additions_path. The default value is upload. If disable is used,
+ // guest additions won't be downloaded, either.
+ GuestAdditionsMode string `mapstructure:"guest_additions_mode" required:"false"`
}
func (c *GuestAdditionsConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/virtualbox/common/hw_config.go b/builder/virtualbox/common/hw_config.go
index 9e7079237..ec63b1a46 100644
--- a/builder/virtualbox/common/hw_config.go
+++ b/builder/virtualbox/common/hw_config.go
@@ -7,14 +7,19 @@ import (
)
type HWConfig struct {
-
- // cpu information
- CpuCount int `mapstructure:"cpus"`
- MemorySize int `mapstructure:"memory"`
-
- // device presence
- Sound string `mapstructure:"sound"`
- USB bool `mapstructure:"usb"`
+ // The number of cpus to use for building the VM.
+ // Defaults to 1.
+ CpuCount int `mapstructure:"cpus" required:"false"`
+ // The amount of memory to use for building the VM
+ // in megabytes. Defaults to 512 megabytes.
+ MemorySize int `mapstructure:"memory" required:"false"`
+ // Defaults to none. The type of audio device to use for
+ // sound when building the VM. Some of the options that are available are
+ // dsound, oss, alsa, pulse, coreaudio, null.
+ Sound string `mapstructure:"sound" required:"false"`
+ // Specifies whether or not to enable the USB bus when
+ // building the VM. Defaults to false.
+ USB bool `mapstructure:"usb" required:"false"`
}
func (c *HWConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/virtualbox/common/output_config.go b/builder/virtualbox/common/output_config.go
index 14eced008..263d0b8e6 100644
--- a/builder/virtualbox/common/output_config.go
+++ b/builder/virtualbox/common/output_config.go
@@ -8,7 +8,13 @@ import (
)
type OutputConfig struct {
- OutputDir string `mapstructure:"output_directory"`
+ // This is the path to the directory where the
+ // resulting virtual machine will be created. This may be relative or absolute.
+ // If relative, the path is relative to the working directory when packer
+ // is executed. This directory must not exist or be empty prior to running
+ // the builder. By default this is output-BUILDNAME where "BUILDNAME" is the
+ // name of the build.
+ OutputDir string `mapstructure:"output_directory" required:"false"`
}
func (c *OutputConfig) Prepare(ctx *interpolate.Context, pc *common.PackerConfig) []error {
diff --git a/builder/virtualbox/common/run_config.go b/builder/virtualbox/common/run_config.go
index e5cae02a6..b460ca45d 100644
--- a/builder/virtualbox/common/run_config.go
+++ b/builder/virtualbox/common/run_config.go
@@ -7,10 +7,20 @@ import (
)
type RunConfig struct {
- Headless bool `mapstructure:"headless"`
-
- VRDPBindAddress string `mapstructure:"vrdp_bind_address"`
- VRDPPortMin int `mapstructure:"vrdp_port_min"`
+ // Packer defaults to building VirtualBox virtual
+ // machines by launching a GUI that shows the console of the machine
+ // being built. When this value is set to true, the machine will start
+ // without a console.
+ Headless bool `mapstructure:"headless" required:"false"`
+ // The IP address that should be
+ // binded to for VRDP. By default packer will use 127.0.0.1 for this. If you
+ // wish to bind to all interfaces use 0.0.0.0.
+ VRDPBindAddress string `mapstructure:"vrdp_bind_address" required:"false"`
+ // The minimum and maximum port
+ // to use for VRDP access to the virtual machine. Packer uses a randomly chosen
+ // port in this range that appears available. By default this is 5900 to
+ // 6000. The minimum and maximum ports are inclusive.
+ VRDPPortMin int `mapstructure:"vrdp_port_min" required:"false"`
VRDPPortMax int `mapstructure:"vrdp_port_max"`
}
diff --git a/builder/virtualbox/common/shutdown_config.go b/builder/virtualbox/common/shutdown_config.go
index 05180ef09..e5d7acf9d 100644
--- a/builder/virtualbox/common/shutdown_config.go
+++ b/builder/virtualbox/common/shutdown_config.go
@@ -8,9 +8,24 @@ import (
)
type ShutdownConfig struct {
- ShutdownCommand string `mapstructure:"shutdown_command"`
- RawShutdownTimeout string `mapstructure:"shutdown_timeout"`
- RawPostShutdownDelay string `mapstructure:"post_shutdown_delay"`
+ // The command to use to gracefully shut down the
+ // machine once all the provisioning is done. By default this is an empty
+ // string, which tells Packer to just forcefully shut down the machine unless a
+ // shutdown command takes place inside script so this may safely be omitted. If
+ // one or more scripts require a reboot it is suggested to leave this blank
+ // since reboots may fail and specify the final shutdown command in your
+ // last script.
+ ShutdownCommand string `mapstructure:"shutdown_command" required:"false"`
+ // The amount of time to wait after executing the
+ // shutdown_command for the virtual machine to actually shut down. If it
+ // doesn't shut down in this time, it is an error. By default, the timeout is
+ // 5m or five minutes.
+ RawShutdownTimeout string `mapstructure:"shutdown_timeout" required:"false"`
+ // The amount of time to wait after shutting
+ // down the virtual machine. If you get the error
+ // Error removing floppy controller, you might need to set this to 5m
+ // or so. By default, the delay is 0s or disabled.
+ RawPostShutdownDelay string `mapstructure:"post_shutdown_delay" required:"false"`
ShutdownTimeout time.Duration ``
PostShutdownDelay time.Duration ``
diff --git a/builder/virtualbox/common/ssh_config.go b/builder/virtualbox/common/ssh_config.go
index 10159cc86..57bce7353 100644
--- a/builder/virtualbox/common/ssh_config.go
+++ b/builder/virtualbox/common/ssh_config.go
@@ -10,10 +10,17 @@ import (
type SSHConfig struct {
Comm communicator.Config `mapstructure:",squash"`
-
- SSHHostPortMin int `mapstructure:"ssh_host_port_min"`
+ // The minimum and
+ // maximum port to use for the SSH port on the host machine which is forwarded
+ // to the SSH port on the guest machine. Because Packer often runs in parallel,
+ // Packer will choose a randomly available port in this range to use as the
+ // host port. By default this is 2222 to 4444.
+ SSHHostPortMin int `mapstructure:"ssh_host_port_min" required:"false"`
SSHHostPortMax int `mapstructure:"ssh_host_port_max"`
- SSHSkipNatMapping bool `mapstructure:"ssh_skip_nat_mapping"`
+ // Defaults to false. When enabled, Packer
+ // does not setup forwarded port mapping for SSH requests and uses ssh_port
+ // on the host to communicate to the virtual machine.
+ SSHSkipNatMapping bool `mapstructure:"ssh_skip_nat_mapping" required:"false"`
// These are deprecated, but we keep them around for BC
// TODO(@mitchellh): remove
diff --git a/builder/virtualbox/common/vbox_version_config.go b/builder/virtualbox/common/vbox_version_config.go
index 96a62c8a2..4aa200e6d 100644
--- a/builder/virtualbox/common/vbox_version_config.go
+++ b/builder/virtualbox/common/vbox_version_config.go
@@ -8,7 +8,13 @@ import (
type VBoxVersionConfig struct {
Communicator string `mapstructure:"communicator"`
- VBoxVersionFile *string `mapstructure:"virtualbox_version_file"`
+ // The path within the virtual machine to
+ // upload a file that contains the VirtualBox version that was used to create
+ // the machine. This information can be useful for provisioning. By default
+ // this is .vbox_version, which will generally be upload it into the
+ // home directory. Set to an empty string to skip uploading this file, which
+ // can be useful when using the none communicator.
+ VBoxVersionFile *string `mapstructure:"virtualbox_version_file" required:"false"`
}
func (c *VBoxVersionConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/virtualbox/common/vboxbundle_config.go b/builder/virtualbox/common/vboxbundle_config.go
index 944646aca..7e863d529 100644
--- a/builder/virtualbox/common/vboxbundle_config.go
+++ b/builder/virtualbox/common/vboxbundle_config.go
@@ -5,7 +5,11 @@ import (
)
type VBoxBundleConfig struct {
- BundleISO bool `mapstructure:"bundle_iso"`
+ // Defaults to false. When enabled, Packer includes
+ // any attached ISO disc devices into the final virtual machine. Useful for
+ // some live distributions that require installation media to continue to be
+ // attached after installation.
+ BundleISO bool `mapstructure:"bundle_iso" required:"false"`
}
func (c *VBoxBundleConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/virtualbox/common/vboxmanage_config.go b/builder/virtualbox/common/vboxmanage_config.go
index 7ee0a19ce..48c1dd6ca 100644
--- a/builder/virtualbox/common/vboxmanage_config.go
+++ b/builder/virtualbox/common/vboxmanage_config.go
@@ -5,7 +5,17 @@ import (
)
type VBoxManageConfig struct {
- VBoxManage [][]string `mapstructure:"vboxmanage"`
+ // Custom VBoxManage commands to
+ // execute in order to further customize the virtual machine being created. The
+ // value of this is an array of commands to execute. The commands are executed
+ // in the order defined in the template. For each command, the command is
+ // defined itself as an array of strings, where each string represents a single
+ // argument on the command-line to VBoxManage (but excluding
+ // VBoxManage itself). Each arg is treated as a configuration
+ // template, where the Name
+ // variable is replaced with the VM name. More details on how to use
+ // VBoxManage are below.
+ VBoxManage [][]string `mapstructure:"vboxmanage" required:"false"`
}
func (c *VBoxManageConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/virtualbox/common/vboxmanage_post_config.go b/builder/virtualbox/common/vboxmanage_post_config.go
index d05913f9f..df923fe38 100644
--- a/builder/virtualbox/common/vboxmanage_post_config.go
+++ b/builder/virtualbox/common/vboxmanage_post_config.go
@@ -5,7 +5,10 @@ import (
)
type VBoxManagePostConfig struct {
- VBoxManagePost [][]string `mapstructure:"vboxmanage_post"`
+ // Identical to vboxmanage,
+ // except that it is run after the virtual machine is shutdown, and before the
+ // virtual machine is exported.
+ VBoxManagePost [][]string `mapstructure:"vboxmanage_post" required:"false"`
}
func (c *VBoxManagePostConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/virtualbox/iso/builder.go b/builder/virtualbox/iso/builder.go
index 5a94a216e..87ba4322a 100644
--- a/builder/virtualbox/iso/builder.go
+++ b/builder/virtualbox/iso/builder.go
@@ -41,22 +41,81 @@ type Config struct {
vboxcommon.VBoxVersionConfig `mapstructure:",squash"`
vboxcommon.VBoxBundleConfig `mapstructure:",squash"`
vboxcommon.GuestAdditionsConfig `mapstructure:",squash"`
-
- DiskSize uint `mapstructure:"disk_size"`
- GuestAdditionsMode string `mapstructure:"guest_additions_mode"`
- GuestAdditionsPath string `mapstructure:"guest_additions_path"`
- GuestAdditionsSHA256 string `mapstructure:"guest_additions_sha256"`
- GuestAdditionsURL string `mapstructure:"guest_additions_url"`
- GuestAdditionsInterface string `mapstructure:"guest_additions_interface"`
- GuestOSType string `mapstructure:"guest_os_type"`
- HardDriveDiscard bool `mapstructure:"hard_drive_discard"`
- HardDriveInterface string `mapstructure:"hard_drive_interface"`
- SATAPortCount int `mapstructure:"sata_port_count"`
- HardDriveNonrotational bool `mapstructure:"hard_drive_nonrotational"`
- ISOInterface string `mapstructure:"iso_interface"`
- KeepRegistered bool `mapstructure:"keep_registered"`
- SkipExport bool `mapstructure:"skip_export"`
- VMName string `mapstructure:"vm_name"`
+ // The size, in megabytes, of the hard disk to create
+ // for the VM. By default, this is 40000 (about 40 GB).
+ DiskSize uint `mapstructure:"disk_size" required:"false"`
+ // The method by which guest additions are
+ // made available to the guest for installation. Valid options are upload,
+ // attach, or disable. If the mode is attach the guest additions ISO will
+ // be attached as a CD device to the virtual machine. If the mode is upload
+ // the guest additions ISO will be uploaded to the path specified by
+ // guest_additions_path. The default value is upload. If disable is used,
+ // guest additions won't be downloaded, either.
+ GuestAdditionsMode string `mapstructure:"guest_additions_mode" required:"false"`
+ // The path on the guest virtual machine
+ // where the VirtualBox guest additions ISO will be uploaded. By default this
+ // is VBoxGuestAdditions.iso which should upload into the login directory of
+ // the user. This is a configuration
+ // template where the Version
+ // variable is replaced with the VirtualBox version.
+ GuestAdditionsPath string `mapstructure:"guest_additions_path" required:"false"`
+ // The SHA256 checksum of the guest
+ // additions ISO that will be uploaded to the guest VM. By default the
+ // checksums will be downloaded from the VirtualBox website, so this only needs
+ // to be set if you want to be explicit about the checksum.
+ GuestAdditionsSHA256 string `mapstructure:"guest_additions_sha256" required:"false"`
+ // The URL to the guest additions ISO
+ // to upload. This can also be a file URL if the ISO is at a local path. By
+ // default, the VirtualBox builder will attempt to find the guest additions ISO
+ // on the local file system. If it is not available locally, the builder will
+ // download the proper guest additions ISO from the internet.
+ GuestAdditionsURL string `mapstructure:"guest_additions_url" required:"false"`
+ // The interface type to use to mount
+ // guest additions when guest_additions_mode is set to attach. Will
+ // default to the value set in iso_interface, if iso_interface is set.
+ // Will default to "ide", if iso_interface is not set. Options are "ide" and
+ // "sata".
+ GuestAdditionsInterface string `mapstructure:"guest_additions_interface" required:"false"`
+ // The guest OS type being installed. By default
+ // this is other, but you can get dramatic performance improvements by
+ // setting this to the proper value. To view all available values for this run
+ // VBoxManage list ostypes. Setting the correct value hints to VirtualBox how
+ // to optimize the virtual hardware to work best with that operating system.
+ GuestOSType string `mapstructure:"guest_os_type" required:"false"`
+ // When this value is set to true, a VDI
+ // image will be shrunk in response to the trim command from the guest OS.
+ // The size of the cleared area must be at least 1MB. Also set
+ // hard_drive_nonrotational to true to enable TRIM support.
+ HardDriveDiscard bool `mapstructure:"hard_drive_discard" required:"false"`
+ // The type of controller that the primary
+ // hard drive is attached to, defaults to ide. When set to sata, the drive
+ // is attached to an AHCI SATA controller. When set to scsi, the drive is
+ // attached to an LsiLogic SCSI controller.
+ HardDriveInterface string `mapstructure:"hard_drive_interface" required:"false"`
+ // The number of ports available on any SATA
+ // controller created, defaults to 1. VirtualBox supports up to 30 ports on a
+ // maximum of 1 SATA controller. Increasing this value can be useful if you
+ // want to attach additional drives.
+ SATAPortCount int `mapstructure:"sata_port_count" required:"false"`
+ // Forces some guests (i.e. Windows 7+)
+ // to treat disks as SSDs and stops them from performing disk fragmentation.
+ // Also set hard_drive_discard to true to enable TRIM support.
+ HardDriveNonrotational bool `mapstructure:"hard_drive_nonrotational" required:"false"`
+ // The type of controller that the ISO is attached
+ // to, defaults to ide. When set to sata, the drive is attached to an AHCI
+ // SATA controller.
+ ISOInterface string `mapstructure:"iso_interface" required:"false"`
+ // Set this to true if you would like to keep
+ // the VM registered with virtualbox. Defaults to false.
+ KeepRegistered bool `mapstructure:"keep_registered" required:"false"`
+ // Defaults to false. When enabled, Packer will
+ // not export the VM. Useful if the build output is not the resultant image,
+ // but created inside the VM.
+ SkipExport bool `mapstructure:"skip_export" required:"false"`
+ // This is the name of the OVF file for the new virtual
+ // machine, without the file extension. By default this is packer-BUILDNAME,
+ // where "BUILDNAME" is the name of the build.
+ VMName string `mapstructure:"vm_name" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/virtualbox/ovf/config.go b/builder/virtualbox/ovf/config.go
index dd22766aa..50c3819d4 100644
--- a/builder/virtualbox/ovf/config.go
+++ b/builder/virtualbox/ovf/config.go
@@ -29,21 +29,77 @@ type Config struct {
vboxcommon.VBoxManagePostConfig `mapstructure:",squash"`
vboxcommon.VBoxVersionConfig `mapstructure:",squash"`
vboxcommon.GuestAdditionsConfig `mapstructure:",squash"`
-
- Checksum string `mapstructure:"checksum"`
- ChecksumType string `mapstructure:"checksum_type"`
- GuestAdditionsMode string `mapstructure:"guest_additions_mode"`
- GuestAdditionsPath string `mapstructure:"guest_additions_path"`
- GuestAdditionsInterface string `mapstructure:"guest_additions_interface"`
- GuestAdditionsSHA256 string `mapstructure:"guest_additions_sha256"`
- GuestAdditionsURL string `mapstructure:"guest_additions_url"`
- ImportFlags []string `mapstructure:"import_flags"`
- ImportOpts string `mapstructure:"import_opts"`
- SourcePath string `mapstructure:"source_path"`
- TargetPath string `mapstructure:"target_path"`
- VMName string `mapstructure:"vm_name"`
- KeepRegistered bool `mapstructure:"keep_registered"`
- SkipExport bool `mapstructure:"skip_export"`
+ // The checksum for the source_path file. The
+ // algorithm to use when computing the checksum can be optionally specified
+ // with checksum_type. When checksum_type is not set packer will guess the
+ // checksumming type based on checksum length. checksum can be also be a
+ // file or an URL, in which case checksum_type must be set to file; the
+ // go-getter will download it and use the first hash found.
+ Checksum string `mapstructure:"checksum" required:"true"`
+ // The type of the checksum specified in checksum.
+ // Valid values are none, md5, sha1, sha256, or sha512. Although the
+ // checksum will not be verified when checksum_type is set to "none", this is
+ // not recommended since OVA files can be very large and corruption does happen
+ // from time to time.
+ ChecksumType string `mapstructure:"checksum_type" required:"false"`
+ // The method by which guest additions are
+ // made available to the guest for installation. Valid options are upload,
+ // attach, or disable. If the mode is attach the guest additions ISO will
+ // be attached as a CD device to the virtual machine. If the mode is upload
+ // the guest additions ISO will be uploaded to the path specified by
+ // guest_additions_path. The default value is upload. If disable is used,
+ // guest additions won't be downloaded, either.
+ GuestAdditionsMode string `mapstructure:"guest_additions_mode" required:"false"`
+ // The path on the guest virtual machine
+ // where the VirtualBox guest additions ISO will be uploaded. By default this
+ // is VBoxGuestAdditions.iso which should upload into the login directory of
+ // the user. This is a configuration
+ // template where the Version
+ // variable is replaced with the VirtualBox version.
+ GuestAdditionsPath string `mapstructure:"guest_additions_path" required:"false"`
+ // The interface type to use to mount
+ // guest additions when guest_additions_mode is set to attach. Will
+ // default to the value set in iso_interface, if iso_interface is set.
+ // Will default to "ide", if iso_interface is not set. Options are "ide" and
+ // "sata".
+ GuestAdditionsInterface string `mapstructure:"guest_additions_interface" required:"false"`
+ // The SHA256 checksum of the guest
+ // additions ISO that will be uploaded to the guest VM. By default the
+ // checksums will be downloaded from the VirtualBox website, so this only needs
+ // to be set if you want to be explicit about the checksum.
+ GuestAdditionsSHA256 string `mapstructure:"guest_additions_sha256" required:"false"`
+ // The URL to the guest additions ISO
+ // to upload. This can also be a file URL if the ISO is at a local path. By
+ // default, the VirtualBox builder will attempt to find the guest additions ISO
+ // on the local file system. If it is not available locally, the builder will
+ // download the proper guest additions ISO from the internet.
+ GuestAdditionsURL string `mapstructure:"guest_additions_url" required:"false"`
+ // Additional flags to pass to
+ // VBoxManage import. This can be used to add additional command-line flags
+ // such as --eula-accept to accept a EULA in the OVF.
+ ImportFlags []string `mapstructure:"import_flags" required:"false"`
+ // Additional options to pass to the
+ // VBoxManage import. This can be useful for passing keepallmacs or
+ // keepnatmacs options for existing ovf images.
+ ImportOpts string `mapstructure:"import_opts" required:"false"`
+ // The path to an OVF or OVA file that acts as the
+ // source of this build. This currently must be a local file.
+ SourcePath string `mapstructure:"source_path" required:"true"`
+ // The path where the OVA should be saved
+ // after download. By default, it will go in the packer cache, with a hash of
+ // the original filename as its name.
+ TargetPath string `mapstructure:"target_path" required:"false"`
+ // This is the name of the OVF file for the new virtual
+ // machine, without the file extension. By default this is packer-BUILDNAME,
+ // where "BUILDNAME" is the name of the build.
+ VMName string `mapstructure:"vm_name" required:"false"`
+ // Set this to true if you would like to keep
+ // the VM registered with virtualbox. Defaults to false.
+ KeepRegistered bool `mapstructure:"keep_registered" required:"false"`
+ // Defaults to false. When enabled, Packer will
+ // not export the VM. Useful if the build output is not the resultant image,
+ // but created inside the VM.
+ SkipExport bool `mapstructure:"skip_export" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/vmware/common/driver_config.go b/builder/vmware/common/driver_config.go
index 1add0bb10..f363aed33 100644
--- a/builder/vmware/common/driver_config.go
+++ b/builder/vmware/common/driver_config.go
@@ -14,17 +14,42 @@ import (
)
type DriverConfig struct {
- FusionAppPath string `mapstructure:"fusion_app_path"`
- RemoteType string `mapstructure:"remote_type"`
- RemoteDatastore string `mapstructure:"remote_datastore"`
- RemoteCacheDatastore string `mapstructure:"remote_cache_datastore"`
- RemoteCacheDirectory string `mapstructure:"remote_cache_directory"`
- RemoteHost string `mapstructure:"remote_host"`
- RemotePort int `mapstructure:"remote_port"`
- RemoteUser string `mapstructure:"remote_username"`
- RemotePassword string `mapstructure:"remote_password"`
- RemotePrivateKey string `mapstructure:"remote_private_key_file"`
- SkipValidateCredentials bool `mapstructure:"skip_validate_credentials"`
+ // Path to "VMware Fusion.app". By default this is
+ // /Applications/VMware Fusion.app but this setting allows you to
+ // customize this.
+ FusionAppPath string `mapstructure:"fusion_app_path" required:"false"`
+ // The type of remote machine that will be used to
+ // build this VM rather than a local desktop product. The only value accepted
+ // for this currently is esx5. If this is not set, a desktop product will
+ // be used. By default, this is not set.
+ RemoteType string `mapstructure:"remote_type" required:"false"`
+ // The path to the datastore where the VM will be stored
+ // on the ESXi machine.
+ RemoteDatastore string `mapstructure:"remote_datastore" required:"false"`
+ // The path to the datastore where supporting files
+ // will be stored during the build on the remote machine.
+ RemoteCacheDatastore string `mapstructure:"remote_cache_datastore" required:"false"`
+ // The path where the ISO and/or floppy files will
+ // be stored during the build on the remote machine. The path is relative to
+ // the remote_cache_datastore on the remote machine.
+ RemoteCacheDirectory string `mapstructure:"remote_cache_directory" required:"false"`
+ // The host of the remote machine used for access.
+ // This is only required if remote_type is enabled.
+ RemoteHost string `mapstructure:"remote_host" required:"false"`
+ // The SSH port of the remote machine
+ RemotePort int `mapstructure:"remote_port" required:"false"`
+ // The SSH username used to access the remote machine.
+ RemoteUser string `mapstructure:"remote_username" required:"false"`
+ // The SSH password for access to the remote machine.
+ RemotePassword string `mapstructure:"remote_password" required:"false"`
+ // The SSH key for access to the remote machine.
+ RemotePrivateKey string `mapstructure:"remote_private_key_file" required:"false"`
+ // When Packer is preparing to run a
+ // remote esxi build, and export is not disable, by default it runs a no-op
+ // ovftool command to make sure that the remote_username and remote_password
+ // given are valid. If you set this flag to true, Packer will skip this
+ // validation. Default: false.
+ SkipValidateCredentials bool `mapstructure:"skip_validate_credentials" required:"false"`
}
func (c *DriverConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/vmware/common/export_config.go b/builder/vmware/common/export_config.go
index 79c4eaeea..a09c772ec 100644
--- a/builder/vmware/common/export_config.go
+++ b/builder/vmware/common/export_config.go
@@ -7,11 +7,45 @@ import (
)
type ExportConfig struct {
- Format string `mapstructure:"format"`
- OVFToolOptions []string `mapstructure:"ovftool_options"`
- SkipExport bool `mapstructure:"skip_export"`
- KeepRegistered bool `mapstructure:"keep_registered"`
- SkipCompaction bool `mapstructure:"skip_compaction"`
+ // Either "ovf", "ova" or "vmx", this specifies the output
+ // format of the exported virtual machine. This defaults to "ovf".
+ // Before using this option, you need to install ovftool. This option
+ // currently only works when option remote_type is set to "esx5".
+ // Since ovftool is only capable of password based authentication
+ // remote_password must be set when exporting the VM.
+ Format string `mapstructure:"format" required:"false"`
+ // Extra options to pass to ovftool
+ // during export. Each item in the array is a new argument. The options
+ // --noSSLVerify, --skipManifestCheck, and --targetType are reserved,
+ // and should not be passed to this argument.
+ // Currently, exporting the build VM (with ovftool) is only supported when
+ // building on ESXi e.g. when remote_type is set to esx5. See the
+ // Building on a Remote vSphere
+ // Hypervisor
+ // section below for more info.
+ OVFToolOptions []string `mapstructure:"ovftool_options" required:"false"`
+ // Defaults to false. When enabled, Packer will
+ // not export the VM. Useful if the build output is not the resultant
+ // image, but created inside the VM.
+ // Currently, exporting the build VM is only supported when building on
+ // ESXi e.g. when remote_type is set to esx5. See the Building on a
+ // Remote vSphere
+ // Hypervisor
+ // section below for more info.
+ SkipExport bool `mapstructure:"skip_export" required:"false"`
+ // Set this to true if you would like to keep
+ // the VM registered with the remote ESXi server. If you do not need to export
+ // the vm, then also set skip_export: true in order to avoid an unnecessary
+ // step of using ovftool to export the vm. Defaults to false.
+ KeepRegistered bool `mapstructure:"keep_registered" required:"false"`
+ // VMware-created disks are defragmented and
+ // compacted at the end of the build process using vmware-vdiskmanager or
+ // vmkfstools in ESXi. In certain rare cases, this might actually end up
+ // making the resulting disks slightly larger. If you find this to be the case,
+ // you can disable compaction using this configuration value. Defaults to
+ // false. Default to true for ESXi when disk_type_id is not explicitly
+ // defined and false otherwise.
+ SkipCompaction bool `mapstructure:"skip_compaction" required:"false"`
}
func (c *ExportConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/vmware/common/hw_config.go b/builder/vmware/common/hw_config.go
index 588d295f1..ad72d5465 100644
--- a/builder/vmware/common/hw_config.go
+++ b/builder/vmware/common/hw_config.go
@@ -10,23 +10,42 @@ import (
)
type HWConfig struct {
-
- // cpu information
- CpuCount int `mapstructure:"cpus"`
- MemorySize int `mapstructure:"memory"`
- CoreCount int `mapstructure:"cores"`
-
- // network type and adapter
- Network string `mapstructure:"network"`
- NetworkAdapterType string `mapstructure:"network_adapter_type"`
-
- // device presence
- Sound bool `mapstructure:"sound"`
- USB bool `mapstructure:"usb"`
-
- // communication ports
- Serial string `mapstructure:"serial"`
- Parallel string `mapstructure:"parallel"`
+ // The number of cpus to use when building the VM.
+ CpuCount int `mapstructure:"cpus" required:"false"`
+ // The amount of memory to use when building the VM
+ // in megabytes.
+ MemorySize int `mapstructure:"memory" required:"false"`
+ // The number of cores per socket to use when building the VM.
+ // This corresponds to the cpuid.coresPerSocket option in the .vmx file.
+ CoreCount int `mapstructure:"cores" required:"false"`
+ // This is the network type that the virtual machine will
+ // be created with. This can be one of the generic values that map to a device
+ // such as hostonly, nat, or bridged. If the network is not one of these
+ // values, then it is assumed to be a VMware network device. (VMnet0..x)
+ Network string `mapstructure:"network" required:"false"`
+ // This is the ethernet adapter type the the
+ // virtual machine will be created with. By default the e1000 network adapter
+ // type will be used by Packer. For more information, please consult the
+ //
+ // Choosing a network adapter for your virtual machine for desktop VMware
+ // clients. For ESXi, refer to the proper ESXi documentation.
+ NetworkAdapterType string `mapstructure:"network_adapter_type" required:"false"`
+ // Specify whether to enable VMware's virtual soundcard
+ // device when building the VM. Defaults to false.
+ Sound bool `mapstructure:"sound" required:"false"`
+ // Enable VMware's USB bus when building the guest VM.
+ // Defaults to false. To enable usage of the XHCI bus for USB 3 (5 Gbit/s),
+ // one can use the vmx_data option to enable it by specifying true for
+ // the usb_xhci.present property.
+ USB bool `mapstructure:"usb" required:"false"`
+ // This specifies a serial port to add to the VM.
+ // It has a format of Type:option1,option2,.... The field Type can be one
+ // of the following values: FILE, DEVICE, PIPE, AUTO, or NONE.
+ Serial string `mapstructure:"serial" required:"false"`
+ // This specifies a parallel port to add to the VM. It
+ // has the format of Type:option1,option2,.... Type can be one of the
+ // following values: FILE, DEVICE, AUTO, or NONE.
+ Parallel string `mapstructure:"parallel" required:"false"`
}
func (c *HWConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/vmware/common/output_config.go b/builder/vmware/common/output_config.go
index 14eced008..263d0b8e6 100644
--- a/builder/vmware/common/output_config.go
+++ b/builder/vmware/common/output_config.go
@@ -8,7 +8,13 @@ import (
)
type OutputConfig struct {
- OutputDir string `mapstructure:"output_directory"`
+ // This is the path to the directory where the
+ // resulting virtual machine will be created. This may be relative or absolute.
+ // If relative, the path is relative to the working directory when packer
+ // is executed. This directory must not exist or be empty prior to running
+ // the builder. By default this is output-BUILDNAME where "BUILDNAME" is the
+ // name of the build.
+ OutputDir string `mapstructure:"output_directory" required:"false"`
}
func (c *OutputConfig) Prepare(ctx *interpolate.Context, pc *common.PackerConfig) []error {
diff --git a/builder/vmware/common/run_config.go b/builder/vmware/common/run_config.go
index da9581427..09d4c153d 100644
--- a/builder/vmware/common/run_config.go
+++ b/builder/vmware/common/run_config.go
@@ -7,12 +7,29 @@ import (
)
type RunConfig struct {
- Headless bool `mapstructure:"headless"`
-
- VNCBindAddress string `mapstructure:"vnc_bind_address"`
- VNCPortMin int `mapstructure:"vnc_port_min"`
+ // Packer defaults to building VMware virtual machines
+ // by launching a GUI that shows the console of the machine being built. When
+ // this value is set to true, the machine will start without a console. For
+ // VMware machines, Packer will output VNC connection information in case you
+ // need to connect to the console to debug the build process.
+ Headless bool `mapstructure:"headless" required:"false"`
+ // The IP address that should be
+ // binded to for VNC. By default packer will use 127.0.0.1 for this. If you
+ // wish to bind to all interfaces use 0.0.0.0.
+ VNCBindAddress string `mapstructure:"vnc_bind_address" required:"false"`
+ // The minimum and maximum port
+ // to use for VNC access to the virtual machine. The builder uses VNC to type
+ // the initial boot_command. Because Packer generally runs in parallel,
+ // Packer uses a randomly chosen port in this range that appears available. By
+ // default this is 5900 to 6000. The minimum and maximum ports are
+ // inclusive.
+ VNCPortMin int `mapstructure:"vnc_port_min" required:"false"`
VNCPortMax int `mapstructure:"vnc_port_max"`
- VNCDisablePassword bool `mapstructure:"vnc_disable_password"`
+ // Don't auto-generate a VNC password that
+ // is used to secure the VNC communication with the VM. This must be set to
+ // true if building on ESXi 6.5 and 6.7 with VNC enabled. Defaults to
+ // false.
+ VNCDisablePassword bool `mapstructure:"vnc_disable_password" required:"false"`
}
func (c *RunConfig) Prepare(ctx *interpolate.Context) (errs []error) {
diff --git a/builder/vmware/common/shutdown_config.go b/builder/vmware/common/shutdown_config.go
index faa90268f..159bb5e3c 100644
--- a/builder/vmware/common/shutdown_config.go
+++ b/builder/vmware/common/shutdown_config.go
@@ -8,8 +8,15 @@ import (
)
type ShutdownConfig struct {
- ShutdownCommand string `mapstructure:"shutdown_command"`
- RawShutdownTimeout string `mapstructure:"shutdown_timeout"`
+ // The command to use to gracefully shut down the
+ // machine once all the provisioning is done. By default this is an empty
+ // string, which tells Packer to just forcefully shut down the machine.
+ ShutdownCommand string `mapstructure:"shutdown_command" required:"false"`
+ // The amount of time to wait after executing the
+ // shutdown_command for the virtual machine to actually shut down. If it
+ // doesn't shut down in this time, it is an error. By default, the timeout is
+ // 5m or five minutes.
+ RawShutdownTimeout string `mapstructure:"shutdown_timeout" required:"false"`
ShutdownTimeout time.Duration ``
}
diff --git a/builder/vmware/common/tools_config.go b/builder/vmware/common/tools_config.go
index 8bf5c9bad..4a82c55f1 100644
--- a/builder/vmware/common/tools_config.go
+++ b/builder/vmware/common/tools_config.go
@@ -5,8 +5,18 @@ import (
)
type ToolsConfig struct {
- ToolsUploadFlavor string `mapstructure:"tools_upload_flavor"`
- ToolsUploadPath string `mapstructure:"tools_upload_path"`
+ // The flavor of the VMware Tools ISO to
+ // upload into the VM. Valid values are darwin, linux, and windows. By
+ // default, this is empty, which means VMware tools won't be uploaded.
+ ToolsUploadFlavor string `mapstructure:"tools_upload_flavor" required:"false"`
+ // The path in the VM to upload the
+ // VMware tools. This only takes effect if tools_upload_flavor is non-empty.
+ // This is a configuration
+ // template that has a single
+ // valid variable: Flavor, which will be the value of tools_upload_flavor.
+ // By default the upload path is set to {{.Flavor}}.iso. This setting is not
+ // used when remote_type is esx5.
+ ToolsUploadPath string `mapstructure:"tools_upload_path" required:"false"`
}
func (c *ToolsConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/vmware/common/vmx_config.go b/builder/vmware/common/vmx_config.go
index 34b302305..225dec923 100644
--- a/builder/vmware/common/vmx_config.go
+++ b/builder/vmware/common/vmx_config.go
@@ -5,10 +5,27 @@ import (
)
type VMXConfig struct {
- VMXData map[string]string `mapstructure:"vmx_data"`
- VMXDataPost map[string]string `mapstructure:"vmx_data_post"`
- VMXRemoveEthernet bool `mapstructure:"vmx_remove_ethernet_interfaces"`
- VMXDisplayName string `mapstructure:"display_name"`
+ // Arbitrary key/values to enter
+ // into the virtual machine VMX file. This is for advanced users who want to
+ // set properties that aren't yet supported by the builder.
+ VMXData map[string]string `mapstructure:"vmx_data" required:"false"`
+ // Identical to vmx_data,
+ // except that it is run after the virtual machine is shutdown, and before the
+ // virtual machine is exported.
+ VMXDataPost map[string]string `mapstructure:"vmx_data_post" required:"false"`
+ // Remove all ethernet interfaces
+ // from the VMX file after building. This is for advanced users who understand
+ // the ramifications, but is useful for building Vagrant boxes since Vagrant
+ // will create ethernet interfaces when provisioning a box. Defaults to
+ // false.
+ VMXRemoveEthernet bool `mapstructure:"vmx_remove_ethernet_interfaces" required:"false"`
+ // The name that will appear in your vSphere client,
+ // and will be used for the vmx basename. This will override the "displayname"
+ // value in your vmx file. It will also override the "displayname" if you have
+ // set it in the "vmx_data" Packer option. This option is useful if you are
+ // chaining vmx builds and want to make sure that the display name of each step
+ // in the chain is unique.
+ VMXDisplayName string `mapstructure:"display_name" required:"false"`
}
func (c *VMXConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/vmware/iso/config.go b/builder/vmware/iso/config.go
index 54ed11c28..1363ac789 100644
--- a/builder/vmware/iso/config.go
+++ b/builder/vmware/iso/config.go
@@ -29,25 +29,70 @@ type Config struct {
vmwcommon.ToolsConfig `mapstructure:",squash"`
vmwcommon.VMXConfig `mapstructure:",squash"`
vmwcommon.ExportConfig `mapstructure:",squash"`
-
- // disk drives
- AdditionalDiskSize []uint `mapstructure:"disk_additional_size"`
- DiskAdapterType string `mapstructure:"disk_adapter_type"`
- DiskName string `mapstructure:"vmdk_name"`
- DiskSize uint `mapstructure:"disk_size"`
- DiskTypeId string `mapstructure:"disk_type_id"`
- Format string `mapstructure:"format"`
-
- // cdrom drive
- CdromAdapterType string `mapstructure:"cdrom_adapter_type"`
-
- // platform information
- GuestOSType string `mapstructure:"guest_os_type"`
- Version string `mapstructure:"version"`
- VMName string `mapstructure:"vm_name"`
+ // The size(s) of any additional
+ // hard disks for the VM in megabytes. If this is not specified then the VM
+ // will only contain a primary hard disk. The builder uses expandable, not
+ // fixed-size virtual hard disks, so the actual file representing the disk will
+ // not use the full size unless it is full.
+ AdditionalDiskSize []uint `mapstructure:"disk_additional_size" required:"false"`
+ // The adapter type of the VMware virtual disk
+ // to create. This option is for advanced usage, modify only if you know what
+ // you're doing. Some of the options you can specify are ide, sata, nvme
+ // or scsi (which uses the "lsilogic" scsi interface by default). If you
+ // specify another option, Packer will assume that you're specifying a scsi
+ // interface of that specified type. For more information, please consult the
+ //
+ // Virtual Disk Manager User's Guide for desktop VMware clients.
+ // For ESXi, refer to the proper ESXi documentation.
+ DiskAdapterType string `mapstructure:"disk_adapter_type" required:"false"`
+ // The filename of the virtual disk that'll be created,
+ // without the extension. This defaults to packer.
+ DiskName string `mapstructure:"vmdk_name" required:"false"`
+ // The size of the hard disk for the VM in megabytes.
+ // The builder uses expandable, not fixed-size virtual hard disks, so the
+ // actual file representing the disk will not use the full size unless it
+ // is full. By default this is set to 40000 (about 40 GB).
+ DiskSize uint `mapstructure:"disk_size" required:"false"`
+ // The type of VMware virtual disk to create. This
+ // option is for advanced usage.
+ DiskTypeId string `mapstructure:"disk_type_id" required:"false"`
+ // Either "ovf", "ova" or "vmx", this specifies the output
+ // format of the exported virtual machine. This defaults to "ovf".
+ // Before using this option, you need to install ovftool. This option
+ // currently only works when option remote_type is set to "esx5".
+ // Since ovftool is only capable of password based authentication
+ // remote_password must be set when exporting the VM.
+ Format string `mapstructure:"format" required:"false"`
+ // The adapter type (or bus) that will be used
+ // by the cdrom device. This is chosen by default based on the disk adapter
+ // type. VMware tends to lean towards ide for the cdrom device unless
+ // sata is chosen for the disk adapter and so Packer attempts to mirror
+ // this logic. This field can be specified as either ide, sata, or scsi.
+ CdromAdapterType string `mapstructure:"cdrom_adapter_type" required:"false"`
+ // The guest OS type being installed. This will be
+ // set in the VMware VMX. By default this is other. By specifying a more
+ // specific OS type, VMware may perform some optimizations or virtual hardware
+ // changes to better support the operating system running in the
+ // virtual machine.
+ GuestOSType string `mapstructure:"guest_os_type" required:"false"`
+ // The vmx hardware
+ // version
+ // for the new virtual machine. Only the default value has been tested, any
+ // other value is experimental. Default value is 9.
+ Version string `mapstructure:"version" required:"false"`
+ // This is the name of the VMX file for the new virtual
+ // machine, without the file extension. By default this is packer-BUILDNAME,
+ // where "BUILDNAME" is the name of the build.
+ VMName string `mapstructure:"vm_name" required:"false"`
VMXDiskTemplatePath string `mapstructure:"vmx_disk_template_path"`
- VMXTemplatePath string `mapstructure:"vmx_template_path"`
+ // Path to a configuration
+ // template that defines the
+ // contents of the virtual machine VMX file for VMware. This is for advanced
+ // users only as this can render the virtual machine non-functional. See
+ // below for more information. For basic VMX modifications, try
+ // vmx_data first.
+ VMXTemplatePath string `mapstructure:"vmx_template_path" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/vmware/vmx/config.go b/builder/vmware/vmx/config.go
index 2650c865d..60bf461f9 100644
--- a/builder/vmware/vmx/config.go
+++ b/builder/vmware/vmx/config.go
@@ -26,11 +26,22 @@ type Config struct {
vmwcommon.ToolsConfig `mapstructure:",squash"`
vmwcommon.VMXConfig `mapstructure:",squash"`
vmwcommon.ExportConfig `mapstructure:",squash"`
-
- Linked bool `mapstructure:"linked"`
- RemoteType string `mapstructure:"remote_type"`
- SourcePath string `mapstructure:"source_path"`
- VMName string `mapstructure:"vm_name"`
+ // By default Packer creates a 'full' clone of
+ // the virtual machine specified in source_path. The resultant virtual
+ // machine is fully independant from the parent it was cloned from.
+ Linked bool `mapstructure:"linked" required:"false"`
+ // The type of remote machine that will be used to
+ // build this VM rather than a local desktop product. The only value accepted
+ // for this currently is esx5. If this is not set, a desktop product will
+ // be used. By default, this is not set.
+ RemoteType string `mapstructure:"remote_type" required:"false"`
+ // Path to the source VMX file to clone. If
+ // remote_type is enabled then this specifies a path on the remote_host.
+ SourcePath string `mapstructure:"source_path" required:"true"`
+ // This is the name of the VMX file for the new virtual
+ // machine, without the file extension. By default this is packer-BUILDNAME,
+ // where "BUILDNAME" is the name of the build.
+ VMName string `mapstructure:"vm_name" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/yandex/config.go b/builder/yandex/config.go
index c725aba4c..aeea94ad5 100644
--- a/builder/yandex/config.go
+++ b/builder/yandex/config.go
@@ -25,38 +25,83 @@ var reImageFamily = regexp.MustCompile(`^[a-z]([-a-z0-9]{0,61}[a-z0-9])?$`)
type Config struct {
common.PackerConfig `mapstructure:",squash"`
Communicator communicator.Config `mapstructure:",squash"`
-
- Endpoint string `mapstructure:"endpoint"`
- FolderID string `mapstructure:"folder_id"`
- ServiceAccountKeyFile string `mapstructure:"service_account_key_file"`
- Token string `mapstructure:"token"`
-
- DiskName string `mapstructure:"disk_name"`
- DiskSizeGb int `mapstructure:"disk_size_gb"`
- DiskType string `mapstructure:"disk_type"`
- ImageDescription string `mapstructure:"image_description"`
- ImageFamily string `mapstructure:"image_family"`
- ImageLabels map[string]string `mapstructure:"image_labels"`
- ImageName string `mapstructure:"image_name"`
- ImageProductIDs []string `mapstructure:"image_product_ids"`
- InstanceCores int `mapstructure:"instance_cores"`
- InstanceMemory int `mapstructure:"instance_mem_gb"`
- InstanceName string `mapstructure:"instance_name"`
- Labels map[string]string `mapstructure:"labels"`
- PlatformID string `mapstructure:"platform_id"`
- Metadata map[string]string `mapstructure:"metadata"`
- SerialLogFile string `mapstructure:"serial_log_file"`
- SourceImageFamily string `mapstructure:"source_image_family"`
- SourceImageFolderID string `mapstructure:"source_image_folder_id"`
- SourceImageID string `mapstructure:"source_image_id"`
- SubnetID string `mapstructure:"subnet_id"`
- UseIPv4Nat bool `mapstructure:"use_ipv4_nat"`
- UseIPv6 bool `mapstructure:"use_ipv6"`
- UseInternalIP bool `mapstructure:"use_internal_ip"`
- Zone string `mapstructure:"zone"`
+ // Non standard api endpoint URL.
+ Endpoint string `mapstructure:"endpoint" required:"false"`
+ // The folder ID that will be used to launch instances and store images.
+ // Alternatively you may set value by environment variable YC_FOLDER_ID.
+ FolderID string `mapstructure:"folder_id" required:"true"`
+ // Path to file with Service Account key in json format. This
+ // is an alternative method to authenticate to Yandex.Cloud. Alternatively you may set environment variable
+ // YC_SERVICE_ACCOUNT_KEY_FILE.
+ ServiceAccountKeyFile string `mapstructure:"service_account_key_file" required:"false"`
+ // OAuth token to use to authenticate to Yandex.Cloud. Alternatively you may set
+ // value by environment variable YC_TOKEN.
+ Token string `mapstructure:"token" required:"true"`
+ // The name of the disk, if unset the instance name
+ // will be used.
+ DiskName string `mapstructure:"disk_name" required:"false"`
+ // The size of the disk in GB. This defaults to 10, which is 10GB.
+ DiskSizeGb int `mapstructure:"disk_size_gb" required:"false"`
+ // Specify disk type for the launched instance. Defaults to network-hdd.
+ DiskType string `mapstructure:"disk_type" required:"false"`
+ // The description of the resulting image.
+ ImageDescription string `mapstructure:"image_description" required:"false"`
+ // The family name of the resulting image.
+ ImageFamily string `mapstructure:"image_family" required:"false"`
+ // Key/value pair labels to
+ // apply to the created image.
+ ImageLabels map[string]string `mapstructure:"image_labels" required:"false"`
+ // The unique name of the resulting image. Defaults to
+ // packer-{{timestamp}}.
+ ImageName string `mapstructure:"image_name" required:"false"`
+ // License IDs that indicate which licenses are attached to resulting image.
+ ImageProductIDs []string `mapstructure:"image_product_ids" required:"false"`
+ // The number of cores available to the instance.
+ InstanceCores int `mapstructure:"instance_cores" required:"false"`
+ // The amount of memory available to the instance, specified in gigabytes.
+ InstanceMemory int `mapstructure:"instance_mem_gb" required:"false"`
+ // The name assigned to the instance.
+ InstanceName string `mapstructure:"instance_name" required:"false"`
+ // Key/value pair labels to apply to
+ // the launched instance.
+ Labels map[string]string `mapstructure:"labels" required:"false"`
+ // Identifier of the hardware platform configuration for the instance. This defaults to standard-v1.
+ PlatformID string `mapstructure:"platform_id" required:"false"`
+ // Metadata applied to the launched
+ // instance.
+ Metadata map[string]string `mapstructure:"metadata" required:"false"`
+ // File path to save serial port output of the launched instance.
+ SerialLogFile string `mapstructure:"serial_log_file" required:"false"`
+ // The source image family to create the new image
+ // from. You can also specify source_image_id instead. Just one of a source_image_id or
+ // source_image_family must be specified. Example: ubuntu-1804-lts
+ SourceImageFamily string `mapstructure:"source_image_family" required:"true"`
+ // The ID of the folder containing the source image.
+ SourceImageFolderID string `mapstructure:"source_image_folder_id" required:"false"`
+ // The source image ID to use to create the new image
+ // from.
+ SourceImageID string `mapstructure:"source_image_id" required:"false"`
+ // The Yandex VPC subnet id to use for
+ // the launched instance. Note, the zone of the subnet must match the
+ // zone in which the VM is launched.
+ SubnetID string `mapstructure:"subnet_id" required:"false"`
+ // If set to true, then launched instance will have external internet
+ // access.
+ UseIPv4Nat bool `mapstructure:"use_ipv4_nat" required:"false"`
+ // Set to true to enable IPv6 for the instance being
+ // created. This defaults to false, or not enabled.
+ // -> Note: ~> Usage of IPv6 will be available in the future.
+ UseIPv6 bool `mapstructure:"use_ipv6" required:"false"`
+ // If true, use the instance's internal IP address
+ // instead of its external IP during building.
+ UseInternalIP bool `mapstructure:"use_internal_ip" required:"false"`
+ // The name of the zone to launch the instance. This defaults to ru-central1-a.
+ Zone string `mapstructure:"zone" required:"false"`
ctx interpolate.Context
- StateTimeout time.Duration `mapstructure:"state_timeout"`
+ // The time to wait for instance state changes.
+ // Defaults to 5m.
+ StateTimeout time.Duration `mapstructure:"state_timeout" required:"false"`
}
func NewConfig(raws ...interface{}) (*Config, []string, error) {
diff --git a/cmd/doc-required-scraper/main.go b/cmd/doc-required-scraper/main.go
index caef5bcd0..936738214 100644
--- a/cmd/doc-required-scraper/main.go
+++ b/cmd/doc-required-scraper/main.go
@@ -2,6 +2,10 @@ package main
import (
"fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
"strings"
"github.com/gocolly/colly"
@@ -12,6 +16,12 @@ const (
CacheDir = "cache/"
)
+func commentize(text string) string {
+ text = text[strings.Index(text, ") -")+len(") -"):]
+ text = strings.ReplaceAll(text, "\n", "\n // ")
+ return text
+}
+
func main() {
c := colly.NewCollector()
@@ -23,18 +33,55 @@ func main() {
}
e.Request.Visit(url)
})
+ for _, required := range []bool{true, false} {
+ required := required
+ sel := "#required- + ul a[name]"
+ if !required {
+ sel = "#optional- + ul a[name]"
+ }
+ c.OnHTML(sel, func(e *colly.HTMLElement) {
- c.OnHTML("#required- + ul a[name]", func(e *colly.HTMLElement) {
+ name := e.Attr("name")
- builder := e.Request.URL.Path[strings.Index(e.Request.URL.Path, "/builders/")+len("/builders/"):]
- builder = strings.TrimSuffix(builder, ".html")
+ builder := e.Request.URL.Path[strings.Index(e.Request.URL.Path, "/builders/")+len("/builders/"):]
+ builder = strings.TrimSuffix(builder, ".html")
- text := e.DOM.Parent().Text()
- text = strings.ReplaceAll(text, "\n", "")
- text = strings.TrimSpace(text)
+ fieldDoc := e.DOM.Parent()
+ text := fieldDoc.Text()
- fmt.Printf("required: %25s builder: %20s text: %s\n", e.Attr("name"), builder, text)
- })
+ builderPath := strings.Split(builder, "-")[0]
+ if name == "vpc_filter" {
+ fmt.Printf("required: %25s builderPath: %20s text: %20s\n", name, builderPath, text)
+ }
+
+ err := filepath.Walk("./builder/"+builderPath, func(path string, info os.FileInfo, err error) error {
+ if err != nil || info.IsDir() || filepath.Ext(path) != ".go" {
+ return nil
+ }
+ body, err := ioutil.ReadFile(path)
+ if err != nil {
+ panic(err)
+ }
+ regex := regexp.MustCompile(fmt.Sprintf(`(\n\s+//.*)*\n+(\s*)([A-Z].*mapstructure:"%s")(\s+required:"%t")?(.*)`, name, required))
+
+ replaced := regex.ReplaceAll(body, []byte(fmt.Sprintf("\n$2//%s\n"+`$2$3 required:"%t"$5`, commentize(text), required)))
+
+ if string(replaced) == string(body) {
+ return nil
+ }
+
+ err = ioutil.WriteFile(path, replaced, 0)
+ if err != nil {
+ panic(err)
+ }
+
+ return nil
+ })
+ if err != nil {
+ panic(err)
+ }
+ })
+ }
c.CacheDir = CacheDir
From e6cbb013ba2cfe1dcc1f71015d624bd170edc4bf Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Fri, 31 May 2019 14:27:41 +0200
Subject: [PATCH 21/97] add // go:generate struct-markdown to all previously
edited files
---
builder/alicloud/ecs/access_config.go | 2 ++
builder/alicloud/ecs/image_config.go | 2 ++
builder/alicloud/ecs/run_config.go | 2 ++
builder/amazon/chroot/builder.go | 2 ++
builder/amazon/common/access_config.go | 2 ++
builder/amazon/common/ami_config.go | 2 ++
builder/amazon/common/block_device.go | 2 ++
builder/amazon/common/run_config.go | 2 ++
builder/amazon/ebssurrogate/builder.go | 2 ++
.../amazon/ebssurrogate/root_block_device.go | 2 ++
builder/amazon/ebsvolume/block_device.go | 2 ++
builder/amazon/ebsvolume/builder.go | 36 ++++++++++---------
builder/amazon/instance/builder.go | 2 ++
builder/azure/arm/clientconfig.go | 2 ++
builder/azure/arm/config.go | 2 ++
builder/cloudstack/config.go | 2 ++
builder/digitalocean/config.go | 2 ++
builder/docker/config.go | 2 ++
builder/docker/ecr_login.go | 2 ++
builder/googlecompute/config.go | 2 ++
builder/hyperone/config.go | 2 ++
builder/hyperv/common/output_config.go | 2 ++
builder/hyperv/common/shutdown_config.go | 2 ++
builder/hyperv/iso/builder.go | 2 ++
builder/hyperv/vmcx/builder.go | 2 ++
builder/lxc/config.go | 2 ++
builder/lxd/config.go | 2 ++
builder/ncloud/config.go | 2 ++
builder/openstack/access_config.go | 2 ++
builder/openstack/image_config.go | 2 ++
builder/openstack/run_config.go | 2 ++
builder/parallels/common/hw_config.go | 2 ++
builder/parallels/common/output_config.go | 2 ++
builder/parallels/common/prlctl_config.go | 2 ++
.../parallels/common/prlctl_post_config.go | 2 ++
.../parallels/common/prlctl_version_config.go | 2 ++
builder/parallels/common/shutdown_config.go | 2 ++
builder/parallels/common/tools_config.go | 2 ++
builder/parallels/iso/builder.go | 2 ++
builder/parallels/pvm/config.go | 2 ++
builder/qemu/builder.go | 2 ++
builder/scaleway/config.go | 2 ++
builder/tencentcloud/cvm/access_config.go | 2 ++
builder/tencentcloud/cvm/image_config.go | 2 ++
builder/tencentcloud/cvm/run_config.go | 2 ++
builder/triton/access_config.go | 2 ++
builder/triton/source_machine_config.go | 2 ++
builder/triton/target_image_config.go | 2 ++
builder/vagrant/builder.go | 2 ++
builder/virtualbox/common/export_config.go | 2 ++
builder/virtualbox/common/export_opts.go | 2 ++
.../common/guest_additions_config.go | 2 ++
builder/virtualbox/common/hw_config.go | 2 ++
builder/virtualbox/common/output_config.go | 2 ++
builder/virtualbox/common/run_config.go | 2 ++
builder/virtualbox/common/shutdown_config.go | 2 ++
builder/virtualbox/common/ssh_config.go | 2 ++
.../virtualbox/common/vbox_version_config.go | 2 ++
.../virtualbox/common/vboxbundle_config.go | 2 ++
.../virtualbox/common/vboxmanage_config.go | 2 ++
.../common/vboxmanage_post_config.go | 2 ++
builder/virtualbox/iso/builder.go | 2 ++
builder/virtualbox/ovf/config.go | 2 ++
builder/vmware/common/driver_config.go | 2 ++
builder/vmware/common/export_config.go | 2 ++
builder/vmware/common/hw_config.go | 2 ++
builder/vmware/common/output_config.go | 2 ++
builder/vmware/common/run_config.go | 2 ++
builder/vmware/common/shutdown_config.go | 2 ++
builder/vmware/common/tools_config.go | 2 ++
builder/vmware/common/vmx_config.go | 2 ++
builder/vmware/iso/config.go | 2 ++
builder/vmware/vmx/config.go | 2 ++
builder/yandex/config.go | 2 ++
74 files changed, 165 insertions(+), 17 deletions(-)
diff --git a/builder/alicloud/ecs/access_config.go b/builder/alicloud/ecs/access_config.go
index acc789d31..0d74409e2 100644
--- a/builder/alicloud/ecs/access_config.go
+++ b/builder/alicloud/ecs/access_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package ecs
import (
diff --git a/builder/alicloud/ecs/image_config.go b/builder/alicloud/ecs/image_config.go
index 98cb0c834..1e4ff6cbc 100644
--- a/builder/alicloud/ecs/image_config.go
+++ b/builder/alicloud/ecs/image_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package ecs
import (
diff --git a/builder/alicloud/ecs/run_config.go b/builder/alicloud/ecs/run_config.go
index a6d4de5a3..070e2557e 100644
--- a/builder/alicloud/ecs/run_config.go
+++ b/builder/alicloud/ecs/run_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package ecs
import (
diff --git a/builder/amazon/chroot/builder.go b/builder/amazon/chroot/builder.go
index 1c4a52f42..d91a40ed4 100644
--- a/builder/amazon/chroot/builder.go
+++ b/builder/amazon/chroot/builder.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
// The chroot package is able to create an Amazon AMI without requiring
// the launch of a new instance for every build. It does this by attaching
// and mounting the root volume of another AMI and chrooting into that
diff --git a/builder/amazon/common/access_config.go b/builder/amazon/common/access_config.go
index e3beac5ea..5ef1de3f4 100644
--- a/builder/amazon/common/access_config.go
+++ b/builder/amazon/common/access_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/amazon/common/ami_config.go b/builder/amazon/common/ami_config.go
index d231aac73..5aefb7a43 100644
--- a/builder/amazon/common/ami_config.go
+++ b/builder/amazon/common/ami_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go
index 9592be58a..4504b3ac4 100644
--- a/builder/amazon/common/block_device.go
+++ b/builder/amazon/common/block_device.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go
index 325433d54..f4e6ae56e 100644
--- a/builder/amazon/common/run_config.go
+++ b/builder/amazon/common/run_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/amazon/ebssurrogate/builder.go b/builder/amazon/ebssurrogate/builder.go
index 7047662cf..afb4f3905 100644
--- a/builder/amazon/ebssurrogate/builder.go
+++ b/builder/amazon/ebssurrogate/builder.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
// The ebssurrogate package contains a packer.Builder implementation that
// builds a new EBS-backed AMI using an ephemeral instance.
package ebssurrogate
diff --git a/builder/amazon/ebssurrogate/root_block_device.go b/builder/amazon/ebssurrogate/root_block_device.go
index 543d15262..e615fd504 100644
--- a/builder/amazon/ebssurrogate/root_block_device.go
+++ b/builder/amazon/ebssurrogate/root_block_device.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package ebssurrogate
import (
diff --git a/builder/amazon/ebsvolume/block_device.go b/builder/amazon/ebsvolume/block_device.go
index ebaffff56..0e6e5da19 100644
--- a/builder/amazon/ebsvolume/block_device.go
+++ b/builder/amazon/ebsvolume/block_device.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package ebsvolume
import (
diff --git a/builder/amazon/ebsvolume/builder.go b/builder/amazon/ebsvolume/builder.go
index 66c0d05c1..3556dcc61 100644
--- a/builder/amazon/ebsvolume/builder.go
+++ b/builder/amazon/ebsvolume/builder.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
// The ebsvolume package contains a packer.Builder implementation that
// builds EBS volumes for Amazon EC2 using an ephemeral instance,
package ebsvolume
@@ -23,25 +25,25 @@ type Config struct {
awscommon.AccessConfig `mapstructure:",squash"`
awscommon.RunConfig `mapstructure:",squash"`
// Add the block device
- // mappings to the AMI. The block device mappings allow for keys:
- VolumeMappings []BlockDevice `mapstructure:"ebs_volumes" required:"false"`
+ // mappings to the AMI. The block device mappings allow for keys:
+ VolumeMappings []BlockDevice `mapstructure:"ebs_volumes" required:"false"`
// Enable enhanced networking (ENA but not
- // SriovNetSupport) on HVM-compatible AMIs. If set, add
- // ec2:ModifyInstanceAttribute to your AWS IAM policy. If false, this will
- // disable enhanced networking in the final AMI as opposed to passing the
- // setting through unchanged from the source. Note: you must make sure
- // enhanced networking is enabled on your instance. See Amazon's
- // documentation on enabling enhanced
- // networking.
- AMIENASupport *bool `mapstructure:"ena_support" required:"false"`
+ // SriovNetSupport) on HVM-compatible AMIs. If set, add
+ // ec2:ModifyInstanceAttribute to your AWS IAM policy. If false, this will
+ // disable enhanced networking in the final AMI as opposed to passing the
+ // setting through unchanged from the source. Note: you must make sure
+ // enhanced networking is enabled on your instance. See Amazon's
+ // documentation on enabling enhanced
+ // networking.
+ AMIENASupport *bool `mapstructure:"ena_support" required:"false"`
// Enable enhanced networking (SriovNetSupport but
- // not ENA) on HVM-compatible AMIs. If true, add
- // ec2:ModifyInstanceAttribute to your AWS IAM policy. Note: you must make
- // sure enhanced networking is enabled on your instance. See Amazon's
- // documentation on enabling enhanced
- // networking.
- // Default false.
- AMISriovNetSupport bool `mapstructure:"sriov_support" required:"false"`
+ // not ENA) on HVM-compatible AMIs. If true, add
+ // ec2:ModifyInstanceAttribute to your AWS IAM policy. Note: you must make
+ // sure enhanced networking is enabled on your instance. See Amazon's
+ // documentation on enabling enhanced
+ // networking.
+ // Default false.
+ AMISriovNetSupport bool `mapstructure:"sriov_support" required:"false"`
launchBlockDevices awscommon.BlockDevices
ctx interpolate.Context
diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go
index 6f64f8733..201dfe73a 100644
--- a/builder/amazon/instance/builder.go
+++ b/builder/amazon/instance/builder.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
// The instance package contains a packer.Builder implementation that builds
// AMIs for Amazon EC2 backed by instance storage, as opposed to EBS storage.
package instance
diff --git a/builder/azure/arm/clientconfig.go b/builder/azure/arm/clientconfig.go
index 4cfac9922..ab8a22ef9 100644
--- a/builder/azure/arm/clientconfig.go
+++ b/builder/azure/arm/clientconfig.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package arm
import (
diff --git a/builder/azure/arm/config.go b/builder/azure/arm/config.go
index aca143ff8..b0b740474 100644
--- a/builder/azure/arm/config.go
+++ b/builder/azure/arm/config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package arm
import (
diff --git a/builder/cloudstack/config.go b/builder/cloudstack/config.go
index 94f76347c..9e4e5c105 100644
--- a/builder/cloudstack/config.go
+++ b/builder/cloudstack/config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package cloudstack
import (
diff --git a/builder/digitalocean/config.go b/builder/digitalocean/config.go
index ff654082c..970a16080 100644
--- a/builder/digitalocean/config.go
+++ b/builder/digitalocean/config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package digitalocean
import (
diff --git a/builder/docker/config.go b/builder/docker/config.go
index c69c5c7f4..dbb041dc0 100644
--- a/builder/docker/config.go
+++ b/builder/docker/config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package docker
import (
diff --git a/builder/docker/ecr_login.go b/builder/docker/ecr_login.go
index 5daed00cd..9438f15e0 100644
--- a/builder/docker/ecr_login.go
+++ b/builder/docker/ecr_login.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package docker
import (
diff --git a/builder/googlecompute/config.go b/builder/googlecompute/config.go
index d5edc42aa..8748e1559 100644
--- a/builder/googlecompute/config.go
+++ b/builder/googlecompute/config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package googlecompute
import (
diff --git a/builder/hyperone/config.go b/builder/hyperone/config.go
index d8adc8f02..ea843eab9 100644
--- a/builder/hyperone/config.go
+++ b/builder/hyperone/config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package hyperone
import (
diff --git a/builder/hyperv/common/output_config.go b/builder/hyperv/common/output_config.go
index 94a4f0ed6..bd8f68045 100644
--- a/builder/hyperv/common/output_config.go
+++ b/builder/hyperv/common/output_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/hyperv/common/shutdown_config.go b/builder/hyperv/common/shutdown_config.go
index f6a2ef9b8..35d8b8266 100644
--- a/builder/hyperv/common/shutdown_config.go
+++ b/builder/hyperv/common/shutdown_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/hyperv/iso/builder.go b/builder/hyperv/iso/builder.go
index 9af93c6f5..29d510a9f 100644
--- a/builder/hyperv/iso/builder.go
+++ b/builder/hyperv/iso/builder.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package iso
import (
diff --git a/builder/hyperv/vmcx/builder.go b/builder/hyperv/vmcx/builder.go
index e34f33698..b8570bf76 100644
--- a/builder/hyperv/vmcx/builder.go
+++ b/builder/hyperv/vmcx/builder.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package vmcx
import (
diff --git a/builder/lxc/config.go b/builder/lxc/config.go
index 7f356b16e..ea9f1978c 100644
--- a/builder/lxc/config.go
+++ b/builder/lxc/config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package lxc
import (
diff --git a/builder/lxd/config.go b/builder/lxd/config.go
index d8b265a1a..4f67bf0ca 100644
--- a/builder/lxd/config.go
+++ b/builder/lxd/config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package lxd
import (
diff --git a/builder/ncloud/config.go b/builder/ncloud/config.go
index 73a20a5b3..f2ee13eef 100644
--- a/builder/ncloud/config.go
+++ b/builder/ncloud/config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package ncloud
import (
diff --git a/builder/openstack/access_config.go b/builder/openstack/access_config.go
index 2b9c27d86..3bfac129a 100644
--- a/builder/openstack/access_config.go
+++ b/builder/openstack/access_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package openstack
import (
diff --git a/builder/openstack/image_config.go b/builder/openstack/image_config.go
index 1f907f1c8..dc9fb1ed5 100644
--- a/builder/openstack/image_config.go
+++ b/builder/openstack/image_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package openstack
import (
diff --git a/builder/openstack/run_config.go b/builder/openstack/run_config.go
index 8d1aef3a3..1f7cdd40b 100644
--- a/builder/openstack/run_config.go
+++ b/builder/openstack/run_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package openstack
import (
diff --git a/builder/parallels/common/hw_config.go b/builder/parallels/common/hw_config.go
index 3515f3ea6..3c0e0aeec 100644
--- a/builder/parallels/common/hw_config.go
+++ b/builder/parallels/common/hw_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/parallels/common/output_config.go b/builder/parallels/common/output_config.go
index 204feffa8..c773cad01 100644
--- a/builder/parallels/common/output_config.go
+++ b/builder/parallels/common/output_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/parallels/common/prlctl_config.go b/builder/parallels/common/prlctl_config.go
index 76aebf3e0..753de4c3e 100644
--- a/builder/parallels/common/prlctl_config.go
+++ b/builder/parallels/common/prlctl_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/parallels/common/prlctl_post_config.go b/builder/parallels/common/prlctl_post_config.go
index 526d12d29..d1763800d 100644
--- a/builder/parallels/common/prlctl_post_config.go
+++ b/builder/parallels/common/prlctl_post_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/parallels/common/prlctl_version_config.go b/builder/parallels/common/prlctl_version_config.go
index b31ade19a..6a7f97ef3 100644
--- a/builder/parallels/common/prlctl_version_config.go
+++ b/builder/parallels/common/prlctl_version_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/parallels/common/shutdown_config.go b/builder/parallels/common/shutdown_config.go
index f9a8626ef..658f90018 100644
--- a/builder/parallels/common/shutdown_config.go
+++ b/builder/parallels/common/shutdown_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/parallels/common/tools_config.go b/builder/parallels/common/tools_config.go
index 6a1d8c790..0b746b939 100644
--- a/builder/parallels/common/tools_config.go
+++ b/builder/parallels/common/tools_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/parallels/iso/builder.go b/builder/parallels/iso/builder.go
index 1ccdd934b..a2c61f1ba 100644
--- a/builder/parallels/iso/builder.go
+++ b/builder/parallels/iso/builder.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package iso
import (
diff --git a/builder/parallels/pvm/config.go b/builder/parallels/pvm/config.go
index 99b54532b..8c11b86d7 100644
--- a/builder/parallels/pvm/config.go
+++ b/builder/parallels/pvm/config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package pvm
import (
diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go
index 55df4c600..97fe3f94e 100644
--- a/builder/qemu/builder.go
+++ b/builder/qemu/builder.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package qemu
import (
diff --git a/builder/scaleway/config.go b/builder/scaleway/config.go
index 6b8cabdba..f3909f7cb 100644
--- a/builder/scaleway/config.go
+++ b/builder/scaleway/config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package scaleway
import (
diff --git a/builder/tencentcloud/cvm/access_config.go b/builder/tencentcloud/cvm/access_config.go
index 6c7c16584..17ff5f74b 100644
--- a/builder/tencentcloud/cvm/access_config.go
+++ b/builder/tencentcloud/cvm/access_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package cvm
import (
diff --git a/builder/tencentcloud/cvm/image_config.go b/builder/tencentcloud/cvm/image_config.go
index b6ece44de..57eb18490 100644
--- a/builder/tencentcloud/cvm/image_config.go
+++ b/builder/tencentcloud/cvm/image_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package cvm
import (
diff --git a/builder/tencentcloud/cvm/run_config.go b/builder/tencentcloud/cvm/run_config.go
index ee304e0cb..21fae66fe 100644
--- a/builder/tencentcloud/cvm/run_config.go
+++ b/builder/tencentcloud/cvm/run_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package cvm
import (
diff --git a/builder/triton/access_config.go b/builder/triton/access_config.go
index 27196b81d..374fcbed5 100644
--- a/builder/triton/access_config.go
+++ b/builder/triton/access_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package triton
import (
diff --git a/builder/triton/source_machine_config.go b/builder/triton/source_machine_config.go
index 5549d0549..93c09e4f5 100644
--- a/builder/triton/source_machine_config.go
+++ b/builder/triton/source_machine_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package triton
import (
diff --git a/builder/triton/target_image_config.go b/builder/triton/target_image_config.go
index 183ae1932..6ee195428 100644
--- a/builder/triton/target_image_config.go
+++ b/builder/triton/target_image_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package triton
import (
diff --git a/builder/vagrant/builder.go b/builder/vagrant/builder.go
index ccd571777..be07796f1 100644
--- a/builder/vagrant/builder.go
+++ b/builder/vagrant/builder.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package vagrant
import (
diff --git a/builder/virtualbox/common/export_config.go b/builder/virtualbox/common/export_config.go
index a5af812b6..af38c0d78 100644
--- a/builder/virtualbox/common/export_config.go
+++ b/builder/virtualbox/common/export_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/virtualbox/common/export_opts.go b/builder/virtualbox/common/export_opts.go
index ad40b1d3c..3a802a314 100644
--- a/builder/virtualbox/common/export_opts.go
+++ b/builder/virtualbox/common/export_opts.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/virtualbox/common/guest_additions_config.go b/builder/virtualbox/common/guest_additions_config.go
index eb2b5e278..082d10b21 100644
--- a/builder/virtualbox/common/guest_additions_config.go
+++ b/builder/virtualbox/common/guest_additions_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/virtualbox/common/hw_config.go b/builder/virtualbox/common/hw_config.go
index ec63b1a46..2f7d6ebfe 100644
--- a/builder/virtualbox/common/hw_config.go
+++ b/builder/virtualbox/common/hw_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/virtualbox/common/output_config.go b/builder/virtualbox/common/output_config.go
index 263d0b8e6..6fa30ebbc 100644
--- a/builder/virtualbox/common/output_config.go
+++ b/builder/virtualbox/common/output_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/virtualbox/common/run_config.go b/builder/virtualbox/common/run_config.go
index b460ca45d..331d3d195 100644
--- a/builder/virtualbox/common/run_config.go
+++ b/builder/virtualbox/common/run_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/virtualbox/common/shutdown_config.go b/builder/virtualbox/common/shutdown_config.go
index e5d7acf9d..85252ed7a 100644
--- a/builder/virtualbox/common/shutdown_config.go
+++ b/builder/virtualbox/common/shutdown_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/virtualbox/common/ssh_config.go b/builder/virtualbox/common/ssh_config.go
index 57bce7353..c78008be8 100644
--- a/builder/virtualbox/common/ssh_config.go
+++ b/builder/virtualbox/common/ssh_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/virtualbox/common/vbox_version_config.go b/builder/virtualbox/common/vbox_version_config.go
index 4aa200e6d..4d911f48e 100644
--- a/builder/virtualbox/common/vbox_version_config.go
+++ b/builder/virtualbox/common/vbox_version_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/virtualbox/common/vboxbundle_config.go b/builder/virtualbox/common/vboxbundle_config.go
index 7e863d529..586609987 100644
--- a/builder/virtualbox/common/vboxbundle_config.go
+++ b/builder/virtualbox/common/vboxbundle_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/virtualbox/common/vboxmanage_config.go b/builder/virtualbox/common/vboxmanage_config.go
index 48c1dd6ca..7027ead1f 100644
--- a/builder/virtualbox/common/vboxmanage_config.go
+++ b/builder/virtualbox/common/vboxmanage_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/virtualbox/common/vboxmanage_post_config.go b/builder/virtualbox/common/vboxmanage_post_config.go
index df923fe38..890a3075d 100644
--- a/builder/virtualbox/common/vboxmanage_post_config.go
+++ b/builder/virtualbox/common/vboxmanage_post_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/virtualbox/iso/builder.go b/builder/virtualbox/iso/builder.go
index 87ba4322a..e16873b24 100644
--- a/builder/virtualbox/iso/builder.go
+++ b/builder/virtualbox/iso/builder.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package iso
import (
diff --git a/builder/virtualbox/ovf/config.go b/builder/virtualbox/ovf/config.go
index 50c3819d4..c8b2bf0d2 100644
--- a/builder/virtualbox/ovf/config.go
+++ b/builder/virtualbox/ovf/config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package ovf
import (
diff --git a/builder/vmware/common/driver_config.go b/builder/vmware/common/driver_config.go
index f363aed33..df8258c69 100644
--- a/builder/vmware/common/driver_config.go
+++ b/builder/vmware/common/driver_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/vmware/common/export_config.go b/builder/vmware/common/export_config.go
index a09c772ec..6c90cea2d 100644
--- a/builder/vmware/common/export_config.go
+++ b/builder/vmware/common/export_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/vmware/common/hw_config.go b/builder/vmware/common/hw_config.go
index ad72d5465..45e7189d3 100644
--- a/builder/vmware/common/hw_config.go
+++ b/builder/vmware/common/hw_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/vmware/common/output_config.go b/builder/vmware/common/output_config.go
index 263d0b8e6..6fa30ebbc 100644
--- a/builder/vmware/common/output_config.go
+++ b/builder/vmware/common/output_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/vmware/common/run_config.go b/builder/vmware/common/run_config.go
index 09d4c153d..214219525 100644
--- a/builder/vmware/common/run_config.go
+++ b/builder/vmware/common/run_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/vmware/common/shutdown_config.go b/builder/vmware/common/shutdown_config.go
index 159bb5e3c..96f7f7d8d 100644
--- a/builder/vmware/common/shutdown_config.go
+++ b/builder/vmware/common/shutdown_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/vmware/common/tools_config.go b/builder/vmware/common/tools_config.go
index 4a82c55f1..b4dd01c91 100644
--- a/builder/vmware/common/tools_config.go
+++ b/builder/vmware/common/tools_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/vmware/common/vmx_config.go b/builder/vmware/common/vmx_config.go
index 225dec923..83d98dfc2 100644
--- a/builder/vmware/common/vmx_config.go
+++ b/builder/vmware/common/vmx_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/builder/vmware/iso/config.go b/builder/vmware/iso/config.go
index 1363ac789..c9434d31a 100644
--- a/builder/vmware/iso/config.go
+++ b/builder/vmware/iso/config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package iso
import (
diff --git a/builder/vmware/vmx/config.go b/builder/vmware/vmx/config.go
index 60bf461f9..bb579e43d 100644
--- a/builder/vmware/vmx/config.go
+++ b/builder/vmware/vmx/config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package vmx
import (
diff --git a/builder/yandex/config.go b/builder/yandex/config.go
index aeea94ad5..bab294f62 100644
--- a/builder/yandex/config.go
+++ b/builder/yandex/config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package yandex
import (
From e09f3fbd02b5e748741af15975e42b5be29d0cae Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Mon, 3 Jun 2019 17:55:09 +0200
Subject: [PATCH 22/97] amazon: update docs & links
---
builder/amazon/chroot/builder.go | 192 +++++++++++-------
builder/amazon/common/access_config.go | 128 +++++++-----
builder/amazon/common/ami_config.go | 169 ++++++++--------
builder/amazon/common/block_device.go | 102 +++++-----
builder/amazon/common/run_config.go | 263 ++++++++++++++-----------
5 files changed, 472 insertions(+), 382 deletions(-)
diff --git a/builder/amazon/chroot/builder.go b/builder/amazon/chroot/builder.go
index d91a40ed4..0de726de0 100644
--- a/builder/amazon/chroot/builder.go
+++ b/builder/amazon/chroot/builder.go
@@ -31,99 +31,139 @@ type Config struct {
awscommon.AMIConfig `mapstructure:",squash"`
awscommon.AccessConfig `mapstructure:",squash"`
// This is a list of devices to
- // mount into the chroot environment. This configuration parameter requires
- // some additional documentation which is in the Chroot
- // Mounts section. Please read that section for more
- // information on how to use this.
- ChrootMounts [][]string `mapstructure:"chroot_mounts" required:"false"`
+ // mount into the chroot environment. This configuration parameter requires
+ // some additional documentation which is in the Chroot
+ // Mounts section. Please read that section for more
+ // information on how to use this.
+ ChrootMounts [][]string `mapstructure:"chroot_mounts" required:"false"`
// How to run shell commands. This defaults to
- // {{.Command}}. This may be useful to set if you want to set environmental
- // variables or perhaps run it with sudo or so on. This is a configuration
- // template where the .Command variable is replaced with the command to be
- // run. Defaults to {{.Command}}.
- CommandWrapper string `mapstructure:"command_wrapper" required:"false"`
+ // {{.Command}}. This may be useful to set if you want to set environmental
+ // variables or perhaps run it with sudo or so on. This is a configuration
+ // template where the .Command variable is replaced with the command to be
+ // run. Defaults to {{.Command}}.
+ CommandWrapper string `mapstructure:"command_wrapper" required:"false"`
// Paths to files on the running EC2
- // instance that will be copied into the chroot environment prior to
- // provisioning. Defaults to /etc/resolv.conf so that DNS lookups work. Pass
- // an empty list to skip copying /etc/resolv.conf. You may need to do this
- // if you're building an image that uses systemd.
- CopyFiles []string `mapstructure:"copy_files" required:"false"`
+ // instance that will be copied into the chroot environment prior to
+ // provisioning. Defaults to /etc/resolv.conf so that DNS lookups work. Pass
+ // an empty list to skip copying /etc/resolv.conf. You may need to do this
+ // if you're building an image that uses systemd.
+ CopyFiles []string `mapstructure:"copy_files" required:"false"`
// The path to the device where the root volume of
- // the source AMI will be attached. This defaults to "" (empty string), which
- // forces Packer to find an open device automatically.
- DevicePath string `mapstructure:"device_path" required:"false"`
+ // the source AMI will be attached. This defaults to "" (empty string), which
+ // forces Packer to find an open device automatically.
+ DevicePath string `mapstructure:"device_path" required:"false"`
// When we call the mount command (by default
- // mount -o device dir), the string provided in nvme_mount_path will
- // replace device in that command. When this option is not set, device in
- // that command will be something like /dev/sdf1, mirroring the attached
- // device name. This assumption works for most instances but will fail with c5
- // and m5 instances. In order to use the chroot builder with c5 and m5
- // instances, you must manually set nvme_device_path and device_path.
- NVMEDevicePath string `mapstructure:"nvme_device_path" required:"false"`
+ // mount -o device dir), the string provided in nvme_mount_path will
+ // replace device in that command. When this option is not set, device in
+ // that command will be something like /dev/sdf1, mirroring the attached
+ // device name. This assumption works for most instances but will fail with c5
+ // and m5 instances. In order to use the chroot builder with c5 and m5
+ // instances, you must manually set nvme_device_path and device_path.
+ NVMEDevicePath string `mapstructure:"nvme_device_path" required:"false"`
// Build a new volume instead of starting from an
- // existing AMI root volume snapshot. Default false. If true, source_ami
- // is no longer used and the following options become required:
- // ami_virtualization_type, pre_mount_commands and root_volume_size. The
- // below options are also required in this mode only:
- FromScratch bool `mapstructure:"from_scratch" required:"false"`
+ // existing AMI root volume snapshot. Default false. If true, source_ami
+ // is no longer used and the following options become required:
+ // ami_virtualization_type, pre_mount_commands and root_volume_size. The
+ // below options are also required in this mode only:
+ FromScratch bool `mapstructure:"from_scratch" required:"false"`
// Options to supply the mount command
- // when mounting devices. Each option will be prefixed with -o and supplied
- // to the mount command ran by Packer. Because this command is ran in a
- // shell, user discretion is advised. See this manual page for the mount
- // command for valid file
- // system specific options.
- MountOptions []string `mapstructure:"mount_options" required:"false"`
+ // when mounting devices. Each option will be prefixed with -o and supplied
+ // to the mount command ran by Packer. Because this command is ran in a
+ // shell, user discretion is advised. See this manual page for the mount
+ // command for valid file
+ // system specific options.
+ MountOptions []string `mapstructure:"mount_options" required:"false"`
// The partition number containing the /
- // partition. By default this is the first partition of the volume, (for
- // example, xvda1) but you can designate the entire block device by setting
- // "mount_partition": "0" in your config, which will mount xvda instead.
- MountPartition string `mapstructure:"mount_partition" required:"false"`
+ // partition. By default this is the first partition of the volume, (for
+ // example, xvda1) but you can designate the entire block device by setting
+ // "mount_partition": "0" in your config, which will mount xvda instead.
+ MountPartition string `mapstructure:"mount_partition" required:"false"`
// The path where the volume will be mounted. This is
- // where the chroot environment will be. This defaults to
- // /mnt/packer-amazon-chroot-volumes/{{.Device}}. This is a configuration
- // template where the .Device variable is replaced with the name of the
- // device where the volume is attached.
- MountPath string `mapstructure:"mount_path" required:"false"`
+ // where the chroot environment will be. This defaults to
+ // /mnt/packer-amazon-chroot-volumes/{{.Device}}. This is a configuration
+ // template where the .Device variable is replaced with the name of the
+ // device where the volume is attached.
+ MountPath string `mapstructure:"mount_path" required:"false"`
// As pre_mount_commands, but the
- // commands are executed after mounting the root device and before the extra
- // mount and copy steps. The device and mount path are provided by
- // {{.Device}} and {{.MountPath}}.
- PostMountCommands []string `mapstructure:"post_mount_commands" required:"false"`
+ // commands are executed after mounting the root device and before the extra
+ // mount and copy steps. The device and mount path are provided by
+ // {{.Device}} and {{.MountPath}}.
+ PostMountCommands []string `mapstructure:"post_mount_commands" required:"false"`
// A series of commands to execute
- // after attaching the root volume and before mounting the chroot. This is not
- // required unless using from_scratch. If so, this should include any
- // partitioning and filesystem creation commands. The path to the device is
- // provided by {{.Device}}.
- PreMountCommands []string `mapstructure:"pre_mount_commands" required:"false"`
+ // after attaching the root volume and before mounting the chroot. This is not
+ // required unless using from_scratch. If so, this should include any
+ // partitioning and filesystem creation commands. The path to the device is
+ // provided by {{.Device}}.
+ PreMountCommands []string `mapstructure:"pre_mount_commands" required:"false"`
// The root device name. For example, xvda.
- RootDeviceName string `mapstructure:"root_device_name" required:"false"`
+ RootDeviceName string `mapstructure:"root_device_name" required:"false"`
// The size of the root volume in GB for the
- // chroot environment and the resulting AMI. Default size is the snapshot size
- // of the source_ami unless from_scratch is true, in which case this
- // field must be defined.
- RootVolumeSize int64 `mapstructure:"root_volume_size" required:"false"`
+ // chroot environment and the resulting AMI. Default size is the snapshot size
+ // of the source_ami unless from_scratch is true, in which case this
+ // field must be defined.
+ RootVolumeSize int64 `mapstructure:"root_volume_size" required:"false"`
// The type of EBS volume for the chroot
- // environment and resulting AMI. The default value is the type of the
- // source_ami, unless from_scratch is true, in which case the default
- // value is gp2. You can only specify io1 if building based on top of a
- // source_ami which is also io1.
- RootVolumeType string `mapstructure:"root_volume_type" required:"false"`
+ // environment and resulting AMI. The default value is the type of the
+ // source_ami, unless from_scratch is true, in which case the default
+ // value is gp2. You can only specify io1 if building based on top of a
+ // source_ami which is also io1.
+ RootVolumeType string `mapstructure:"root_volume_type" required:"false"`
// The source AMI whose root volume will be copied and
- // provisioned on the currently running instance. This must be an EBS-backed
- // AMI with a root volume snapshot that you have access to. Note: this is not
- // used when from_scratch is set to true.
- SourceAmi string `mapstructure:"source_ami" required:"true"`
+ // provisioned on the currently running instance. This must be an EBS-backed
+ // AMI with a root volume snapshot that you have access to. Note: this is not
+ // used when from_scratch is set to true.
+ SourceAmi string `mapstructure:"source_ami" required:"true"`
// Filters used to populate the source_ami
- // field. Example:
- SourceAmiFilter awscommon.AmiFilterOptions `mapstructure:"source_ami_filter" required:"false"`
+ // field. Example:
+ //
+ //
+ // ``` json
+ // {
+ // "source_ami_filter": {
+ // "filters": {
+ // "virtualization-type": "hvm",
+ // "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
+ // "root-device-type": "ebs"
+ // },
+ // "owners": ["099720109477"],
+ // "most_recent": true
+ // }
+ // }
+ // ```
+ //
+ // This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
+ // This will fail unless *exactly* one AMI is returned. In the above example,
+ // `most_recent` will cause this to succeed by selecting the newest image.
+ //
+ // - `filters` (map of strings) - filters used to select a `source_ami`.
+ // NOTE: This will fail unless *exactly* one AMI is returned. Any filter
+ // described in the docs for
+ // [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
+ // is valid.
+ //
+ // - `owners` (array of strings) - Filters the images by their owner. You
+ // may specify one or more AWS account IDs, "self" (which will use the
+ // account whose credentials you are using to run Packer), or an AWS owner
+ // alias: for example, "amazon", "aws-marketplace", or "microsoft". This
+ // option is required for security reasons.
+ //
+ // - `most_recent` (boolean) - Selects the newest created image when true.
+ // This is most useful for selecting a daily distro build.
+ //
+ // You may set this in place of `source_ami` or in conjunction with it. If you
+ // set this in conjunction with `source_ami`, the `source_ami` will be added
+ // to the filter. The provided `source_ami` must meet all of the filtering
+ // criteria provided in `source_ami_filter`; this pins the AMI returned by the
+ // filter, but will cause Packer to fail if the `source_ami` does not exist.
+ SourceAmiFilter awscommon.AmiFilterOptions `mapstructure:"source_ami_filter" required:"false"`
// Tags to apply to the
- // volumes that are launched. This is a template
- // engine, see Build template
- // data for more information.
- RootVolumeTags awscommon.TagMap `mapstructure:"root_volume_tags" required:"false"`
+ // volumes that are *launched*. This is a [template
+ // engine](/docs/templates/engine.html), see [Build template
+ // data](#build-template-data) for more information.
+ RootVolumeTags awscommon.TagMap `mapstructure:"root_volume_tags" required:"false"`
// what architecture to use when registering the
- // final AMI; valid options are "x86_64" or "arm64". Defaults to "x86_64".
- Architecture string `mapstructure:"ami_architecture" required:"false"`
+ // final AMI; valid options are "x86_64" or "arm64". Defaults to "x86_64".
+ Architecture string `mapstructure:"ami_architecture" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/amazon/common/access_config.go b/builder/amazon/common/access_config.go
index 5ef1de3f4..3dd5b2fa4 100644
--- a/builder/amazon/common/access_config.go
+++ b/builder/amazon/common/access_config.go
@@ -21,17 +21,17 @@ import (
)
type VaultAWSEngineOptions struct {
- Name string `mapstructure:"name"`
- RoleARN string `mapstructure:"role_arn"`
+ Name string `mapstructure:"name"`
+ RoleARN string `mapstructure:"role_arn"`
// Specifies the TTL for the use of the STS token. This
- // is specified as a string with a duration suffix. Valid only when
- // credential_type is assumed_role or federation_token. When not
- // specified, the default_sts_ttl set for the role will be used. If that
- // is also not set, then the default value of 3600s will be used. AWS
- // places limits on the maximum TTL allowed. See the AWS documentation on
- // the DurationSeconds parameter for AssumeRole (for assumed_role
- // credential types) and GetFederationToken (for federation_token
- // credential types) for more details.
+ // is specified as a string with a duration suffix. Valid only when
+ // credential_type is assumed_role or federation_token. When not
+ // specified, the default_sts_ttl set for the role will be used. If that
+ // is also not set, then the default value of 3600s will be used. AWS
+ // places limits on the maximum TTL allowed. See the AWS documentation on
+ // the DurationSeconds parameter for AssumeRole (for assumed_role
+ // credential types) and GetFederationToken (for federation_token
+ // credential types) for more details.
TTL string `mapstructure:"ttl" required:"false"`
EngineName string `mapstructure:"engine_name"`
}
@@ -43,55 +43,77 @@ func (v *VaultAWSEngineOptions) Empty() bool {
// AccessConfig is for common configuration related to AWS access
type AccessConfig struct {
- // The access key used to communicate with AWS. Learn
- // how to set this
- AccessKey string `mapstructure:"access_key" required:"true"`
+ // The access key used to communicate with AWS. [Learn how to set this]
+ // (/docs/builders/amazon.html#specifying-amazon-credentials). On EBS, this
+ // is not required if you are using `use_vault_aws_engine` for
+ // authentication instead.
+ AccessKey string `mapstructure:"access_key" required:"true"`
// This option is useful if you use a cloud
- // provider whose API is compatible with aws EC2. Specify another endpoint
- // like this https://ec2.custom.endpoint.com.
- CustomEndpointEc2 string `mapstructure:"custom_endpoint_ec2" required:"false"`
- // Enable automatic decoding of
- // any encoded authorization (error) messages using the
- // sts:DecodeAuthorizationMessage API. Note: requires that the effective
- // user/role have permissions to sts:DecodeAuthorizationMessage on resource
- // *. Default false.
- DecodeAuthZMessages bool `mapstructure:"decode_authorization_messages" required:"false"`
+ // provider whose API is compatible with aws EC2. Specify another endpoint
+ // like this https://ec2.custom.endpoint.com.
+ CustomEndpointEc2 string `mapstructure:"custom_endpoint_ec2" required:"false"`
+ // Enable automatic decoding of any encoded authorization (error) messages
+ // using the `sts:DecodeAuthorizationMessage` API. Note: requires that the
+ // effective user/role have permissions to `sts:DecodeAuthorizationMessage`
+ // on resource `*`. Default `false`.
+ DecodeAuthZMessages bool `mapstructure:"decode_authorization_messages" required:"false"`
// This allows skipping TLS
- // verification of the AWS EC2 endpoint. The default is false.
- InsecureSkipTLSVerify bool `mapstructure:"insecure_skip_tls_verify" required:"false"`
- // The MFA
- // TOTP
- // code. This should probably be a user variable since it changes all the
- // time.
- MFACode string `mapstructure:"mfa_code" required:"false"`
+ // verification of the AWS EC2 endpoint. The default is false.
+ InsecureSkipTLSVerify bool `mapstructure:"insecure_skip_tls_verify" required:"false"`
+ // The MFA TOTP code. This should probably be a user variable since it
+ // changes all the time.
+ MFACode string `mapstructure:"mfa_code" required:"false"`
// The profile to use in the shared credentials file for
- // AWS. See Amazon's documentation on specifying
- // profiles
- // for more details.
- ProfileName string `mapstructure:"profile" required:"false"`
- // The name of the region, such as us-east-1, in which
- // to launch the EC2 instance to create the AMI.
- RawRegion string `mapstructure:"region" required:"true"`
- // The secret key used to communicate with AWS. Learn
- // how to set this
- SecretKey string `mapstructure:"secret_key" required:"true"`
+ // AWS. See Amazon's documentation on [specifying
+ // profiles](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-profiles)
+ // for more details.
+ ProfileName string `mapstructure:"profile" required:"false"`
+ // The name of the region, such as `us-east-1`, in which
+ // to launch the EC2 instance to create the AMI.
+ // When chroot building, this value is guessed from environment.
+ RawRegion string `mapstructure:"region" required:"true"`
+ // The secret key used to communicate with AWS. [Learn how to set
+ // this](amazon.html#specifying-amazon-credentials). This is not required
+ // if you are using `use_vault_aws_engine` for authentication instead.
+ SecretKey string `mapstructure:"secret_key" required:"true"`
// Set to true if you want to skip
- // validation of the ami_regions configuration option. Default false.
- SkipValidation bool `mapstructure:"skip_region_validation" required:"false"`
- SkipMetadataApiCheck bool `mapstructure:"skip_metadata_api_check"`
+ // validation of the ami_regions configuration option. Default false.
+ SkipValidation bool `mapstructure:"skip_region_validation" required:"false"`
+ SkipMetadataApiCheck bool `mapstructure:"skip_metadata_api_check"`
// The access token to use. This is different from the
- // access key and secret key. If you're not sure what this is, then you
- // probably don't need it. This will also be read from the AWS_SESSION_TOKEN
- // environmental variable.
- Token string `mapstructure:"token" required:"false"`
- session *session.Session
+ // access key and secret key. If you're not sure what this is, then you
+ // probably don't need it. This will also be read from the AWS_SESSION_TOKEN
+ // environmental variable.
+ Token string `mapstructure:"token" required:"false"`
+ session *session.Session
// Get credentials from Hashicorp Vault's aws
- // secrets engine. You must already have created a role to use. For more
- // information about generating credentials via the Vault engine, see the
- // Vault
- // docs.
- // If you set this flag, you must also set the below options:
- VaultAWSEngine VaultAWSEngineOptions `mapstructure:"vault_aws_engine" required:"false"`
+ // secrets engine. You must already have created a role to use. For more
+ // information about generating credentials via the Vault engine, see the
+ // Vault
+ // docs.
+ // If you set this flag, you must also set the below options:
+ // - `name` (string) - Required. Specifies the name of the role to generate
+ // credentials against. This is part of the request URL.
+ // - `engine_name` (string) - The name of the aws secrets engine. In the
+ // Vault docs, this is normally referred to as "aws", and Packer will
+ // default to "aws" if `engine_name` is not set.
+ // - `role_arn` (string)- The ARN of the role to assume if credential\_type
+ // on the Vault role is assumed\_role. Must match one of the allowed role
+ // ARNs in the Vault role. Optional if the Vault role only allows a single
+ // AWS role ARN; required otherwise.
+ // - `ttl` (string) - Specifies the TTL for the use of the STS token. This
+ // is specified as a string with a duration suffix. Valid only when
+ // credential\_type is assumed\_role or federation\_token. When not
+ // specified, the default\_sts\_ttl set for the role will be used. If that
+ // is also not set, then the default value of 3600s will be used. AWS
+ // places limits on the maximum TTL allowed. See the AWS documentation on
+ // the DurationSeconds parameter for AssumeRole (for assumed\_role
+ // credential types) and GetFederationToken (for federation\_token
+ // credential types) for more details.
+ //
+ // Example:
+ // `json { "vault_aws_engine": { "name": "myrole", "role_arn": "myarn",
+ VaultAWSEngine VaultAWSEngineOptions `mapstructure:"vault_aws_engine" required:"false"`
getEC2Connection func() ec2iface.EC2API
}
diff --git a/builder/amazon/common/ami_config.go b/builder/amazon/common/ami_config.go
index 5aefb7a43..efd919424 100644
--- a/builder/amazon/common/ami_config.go
+++ b/builder/amazon/common/ami_config.go
@@ -13,106 +13,105 @@ import (
// AMIConfig is for common configuration related to creating AMIs.
type AMIConfig struct {
// The name of the resulting AMI that will appear when
- // managing AMIs in the AWS console or via APIs. This must be unique. To help
- // make this unique, use a function like timestamp (see template
- // engine for more info).
- AMIName string `mapstructure:"ami_name" required:"true"`
+ // managing AMIs in the AWS console or via APIs. This must be unique. To help
+ // make this unique, use a function like timestamp (see [template
+ // engine](../templates/engine.html) for more info).
+ AMIName string `mapstructure:"ami_name" required:"true"`
// The description to set for the resulting
- // AMI(s). By default this description is empty. This is a template
- // engine, see Build template
- // data for more information.
- AMIDescription string `mapstructure:"ami_description" required:"false"`
- // The type of virtualization for the AMI
- // you are building. This option is required to register HVM images. Can be
- // paravirtual (default) or hvm.
- AMIVirtType string `mapstructure:"ami_virtualization_type" required:"false"`
+ // AMI(s). By default this description is empty. This is a template
+ // engine, see Build template
+ // data for more information.
+ AMIDescription string `mapstructure:"ami_description" required:"false"`
+ // The description to set for the resulting AMI(s). By default this
+ // description is empty. This is a [template
+ // engine](../templates/engine.html), see [Build template
+ // data](#build-template-data) for more information.
+ AMIVirtType string `mapstructure:"ami_virtualization_type" required:"false"`
// A list of account IDs that have access to
- // launch the resulting AMI(s). By default no additional users other than the
- // user creating the AMI has permissions to launch it.
- AMIUsers []string `mapstructure:"ami_users" required:"false"`
+ // launch the resulting AMI(s). By default no additional users other than the
+ // user creating the AMI has permissions to launch it.
+ AMIUsers []string `mapstructure:"ami_users" required:"false"`
// A list of groups that have access to
- // launch the resulting AMI(s). By default no groups have permission to launch
- // the AMI. all will make the AMI publicly accessible.
- AMIGroups []string `mapstructure:"ami_groups" required:"false"`
+ // launch the resulting AMI(s). By default no groups have permission to launch
+ // the AMI. all will make the AMI publicly accessible.
+ AMIGroups []string `mapstructure:"ami_groups" required:"false"`
// A list of product codes to
- // associate with the AMI. By default no product codes are associated with the
- // AMI.
- AMIProductCodes []string `mapstructure:"ami_product_codes" required:"false"`
+ // associate with the AMI. By default no product codes are associated with the
+ // AMI.
+ AMIProductCodes []string `mapstructure:"ami_product_codes" required:"false"`
// A list of regions to copy the AMI to.
- // Tags and attributes are copied along with the AMI. AMI copying takes time
- // depending on the size of the AMI, but will generally take many minutes.
- AMIRegions []string `mapstructure:"ami_regions" required:"false"`
+ // Tags and attributes are copied along with the AMI. AMI copying takes time
+ // depending on the size of the AMI, but will generally take many minutes.
+ AMIRegions []string `mapstructure:"ami_regions" required:"false"`
// Set to true if you want to skip
- // validation of the ami_regions configuration option. Default false.
- AMISkipRegionValidation bool `mapstructure:"skip_region_validation" required:"false"`
+ // validation of the ami_regions configuration option. Default false.
+ AMISkipRegionValidation bool `mapstructure:"skip_region_validation" required:"false"`
// Tags applied to the AMI. This is a
- // template engine, see Build template
- // data for more information.
- AMITags TagMap `mapstructure:"tags" required:"false"`
+ // [template engine](/docs/templates/engine.html), see [Build template
+ // data](#build-template-data) for more information.
+ AMITags TagMap `mapstructure:"tags" required:"false"`
// Enable enhanced networking (ENA but not
- // SriovNetSupport) on HVM-compatible AMIs. If set, add
- // ec2:ModifyInstanceAttribute to your AWS IAM policy. If false, this will
- // disable enhanced networking in the final AMI as opposed to passing the
- // setting through unchanged from the source. Note: you must make sure
- // enhanced networking is enabled on your instance. See Amazon's
- // documentation on enabling enhanced
- // networking.
- AMIENASupport *bool `mapstructure:"ena_support" required:"false"`
+ // SriovNetSupport) on HVM-compatible AMIs. If set, add
+ // ec2:ModifyInstanceAttribute to your AWS IAM policy. If false, this will
+ // disable enhanced networking in the final AMI as opposed to passing the
+ // setting through unchanged from the source. Note: you must make sure
+ // enhanced networking is enabled on your instance. [Amazon's
+ // documentation on enabling enhanced
+ // networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
+ AMIENASupport *bool `mapstructure:"ena_support" required:"false"`
// Enable enhanced networking (SriovNetSupport but
- // not ENA) on HVM-compatible AMIs. If true, add
- // ec2:ModifyInstanceAttribute to your AWS IAM policy. Note: you must make
- // sure enhanced networking is enabled on your instance. See Amazon's
- // documentation on enabling enhanced
- // networking.
- // Default false.
- AMISriovNetSupport bool `mapstructure:"sriov_support" required:"false"`
+ // not ENA) on HVM-compatible AMIs. If true, add
+ // ec2:ModifyInstanceAttribute to your AWS IAM policy. Note: you must make
+ // sure enhanced networking is enabled on your instance. See Amazon's
+ // documentation on enabling enhanced
+ // networking.
+ // Default false.
+ AMISriovNetSupport bool `mapstructure:"sriov_support" required:"false"`
// Force Packer to first deregister an existing
- // AMI if one with the same name already exists. Default false.
- AMIForceDeregister bool `mapstructure:"force_deregister" required:"false"`
+ // AMI if one with the same name already exists. Default false.
+ AMIForceDeregister bool `mapstructure:"force_deregister" required:"false"`
// Force Packer to delete snapshots
- // associated with AMIs, which have been deregistered by force_deregister.
- // Default false.
- AMIForceDeleteSnapshot bool `mapstructure:"force_delete_snapshot" required:"false"`
+ // associated with AMIs, which have been deregistered by force_deregister.
+ // Default false.
+ AMIForceDeleteSnapshot bool `mapstructure:"force_delete_snapshot" required:"false"`
// Whether or not to encrypt the resulting AMI when
- // copying a provisioned instance to an AMI. By default, Packer will keep the
- // encryption setting to what it was in the source image. Setting false will
- // result in an unencrypted image, and true will result in an encrypted one.
- AMIEncryptBootVolume *bool `mapstructure:"encrypt_boot" required:"false"`
- // ID, alias or ARN of the KMS key to use for boot
- // volume encryption. This only applies to the main region, other regions
- // where the AMI will be copied will be encrypted by the default EBS KMS key.
- // For valid formats see KmsKeyId in the AWS API docs -
- // CopyImage.
- // This field is validated by Packer, when using an alias, you will have to
- // prefix kms_key_id with alias/.
- AMIKmsKeyId string `mapstructure:"kms_key_id" required:"false"`
- // a map of regions to copy the ami
- // to, along with the custom kms key id (alias or arn) to use for encryption
- // for that region. Keys must match the regions provided in ami_regions. If
- // you just want to encrypt using a default ID, you can stick with
- // kms_key_id and ami_regions. If you want a region to be encrypted with
- // that region's default key ID, you can use an empty string "" instead of a
- // key id in this map. (e.g. "us-east-1": "") However, you cannot use
- // default key IDs if you are using this in conjunction with snapshot_users
- // -- in that situation you must use custom keys. For valid formats see
- // KmsKeyId in the AWS API docs -
- // CopyImage.
- AMIRegionKMSKeyIDs map[string]string `mapstructure:"region_kms_key_ids" required:"false"`
+ // copying a provisioned instance to an AMI. By default, Packer will keep the
+ // encryption setting to what it was in the source image. Setting false will
+ // result in an unencrypted image, and true will result in an encrypted one.
+ AMIEncryptBootVolume *bool `mapstructure:"encrypt_boot" required:"false"`
+ // ID, alias or ARN of the KMS key to use for boot volume encryption. This
+ // only applies to the main `region`, other regions where the AMI will be
+ // copied will be encrypted by the default EBS KMS key. For valid formats
+ // see *KmsKeyId* in the [AWS API docs -
+ // CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
+ // This field is validated by Packer, when using an alias, you will have to
+ // prefix `kms_key_id` with `alias/`.
+ AMIKmsKeyId string `mapstructure:"kms_key_id" required:"false"`
+ // a map of regions to copy the ami to, along with the custom kms key id
+ // (alias or arn) to use for encryption for that region. Keys must match
+ // the regions provided in ami_regions. If you just want to encrypt using a
+ // default ID, you can stick with kms_key_id and ami_regions. If you want a
+ // region to be encrypted with that region's default key ID, you can use an
+ // empty string "" instead of a key id in this map. (e.g. "us-east-1": "")
+ // However, you cannot use default key IDs if you are using this in
+ // conjunction with snapshot_users -- in that situation you must use custom
+ // keys. For valid formats see KmsKeyId in the AWS API docs - CopyImage.
+ AMIRegionKMSKeyIDs map[string]string `mapstructure:"region_kms_key_ids" required:"false"`
// Tags to apply to snapshot.
- // They will override AMI tags if already applied to snapshot. This is a
- // template engine, see Build template
- // data for more information.
- SnapshotTags TagMap `mapstructure:"snapshot_tags" required:"false"`
+ // They will override AMI tags if already applied to snapshot. This is a
+ // template engine, see Build template
+ // data for more information.
+ SnapshotTags TagMap `mapstructure:"snapshot_tags" required:"false"`
// A list of account IDs that have
- // access to create volumes from the snapshot(s). By default no additional
- // users other than the user creating the AMI has permissions to create
- // volumes from the backing snapshot(s).
- SnapshotUsers []string `mapstructure:"snapshot_users" required:"false"`
+ // access to create volumes from the snapshot(s). By default no additional
+ // users other than the user creating the AMI has permissions to create
+ // volumes from the backing snapshot(s).
+ SnapshotUsers []string `mapstructure:"snapshot_users" required:"false"`
// A list of groups that have access to
- // create volumes from the snapshot(s). By default no groups have permission
- // to create volumes from the snapshot(s). all will make the snapshot
- // publicly accessible.
- SnapshotGroups []string `mapstructure:"snapshot_groups" required:"false"`
+ // create volumes from the snapshot(s). By default no groups have permission
+ // to create volumes from the snapshot(s). all will make the snapshot
+ // publicly accessible.
+ SnapshotGroups []string `mapstructure:"snapshot_groups" required:"false"`
}
func stringInSlice(s []string, searchstr string) bool {
diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go
index 4504b3ac4..494a71e9c 100644
--- a/builder/amazon/common/block_device.go
+++ b/builder/amazon/common/block_device.go
@@ -14,51 +14,51 @@ import (
// BlockDevice
type BlockDevice struct {
// Indicates whether the EBS volume is
- // deleted on instance termination. Default false. NOTE: If this
- // value is not explicitly set to true and volumes are not cleaned up by
- // an alternative method, additional volumes will accumulate after every
- // build.
- DeleteOnTermination bool `mapstructure:"delete_on_termination" required:"false"`
+ // deleted on instance termination. Default false. NOTE: If this
+ // value is not explicitly set to true and volumes are not cleaned up by
+ // an alternative method, additional volumes will accumulate after every
+ // build.
+ DeleteOnTermination bool `mapstructure:"delete_on_termination" required:"false"`
// The device name exposed to the instance (for
- // example, /dev/sdh or xvdh). Required for every device in the block
- // device mapping.
- DeviceName string `mapstructure:"device_name" required:"false"`
+ // example, /dev/sdh or xvdh). Required for every device in the block
+ // device mapping.
+ DeviceName string `mapstructure:"device_name" required:"false"`
// Indicates whether or not to encrypt the volume.
- // By default, Packer will keep the encryption setting to what it was in
- // the source image. Setting false will result in an unencrypted device,
- // and true will result in an encrypted one.
- Encrypted *bool `mapstructure:"encrypted" required:"false"`
+ // By default, Packer will keep the encryption setting to what it was in
+ // the source image. Setting false will result in an unencrypted device,
+ // and true will result in an encrypted one.
+ Encrypted *bool `mapstructure:"encrypted" required:"false"`
// The number of I/O operations per second (IOPS) that
- // the volume supports. See the documentation on
- // IOPs
- // for more information
- IOPS int64 `mapstructure:"iops" required:"false"`
+ // the volume supports. See the documentation on
+ // IOPs
+ // for more information
+ IOPS int64 `mapstructure:"iops" required:"false"`
// Suppresses the specified device included in the
- // block device mapping of the AMI.
- NoDevice bool `mapstructure:"no_device" required:"false"`
+ // block device mapping of the AMI.
+ NoDevice bool `mapstructure:"no_device" required:"false"`
// The ID of the snapshot.
- SnapshotId string `mapstructure:"snapshot_id" required:"false"`
+ SnapshotId string `mapstructure:"snapshot_id" required:"false"`
// The virtual device name. See the
- // documentation on Block Device
- // Mapping
- // for more information.
- VirtualName string `mapstructure:"virtual_name" required:"false"`
+ // documentation on Block Device
+ // Mapping
+ // for more information.
+ VirtualName string `mapstructure:"virtual_name" required:"false"`
// The volume type. gp2 for General Purpose
- // (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, st1 for
- // Throughput Optimized HDD, sc1 for Cold HDD, and standard for
- // Magnetic volumes.
- VolumeType string `mapstructure:"volume_type" required:"false"`
+ // (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, st1 for
+ // Throughput Optimized HDD, sc1 for Cold HDD, and standard for
+ // Magnetic volumes.
+ VolumeType string `mapstructure:"volume_type" required:"false"`
// The size of the volume, in GiB. Required if
- // not specifying a snapshot_id.
- VolumeSize int64 `mapstructure:"volume_size" required:"false"`
+ // not specifying a snapshot_id.
+ VolumeSize int64 `mapstructure:"volume_size" required:"false"`
// ID, alias or ARN of the KMS key to use for boot
- // volume encryption. This only applies to the main region, other regions
- // where the AMI will be copied will be encrypted by the default EBS KMS key.
- // For valid formats see KmsKeyId in the AWS API docs -
- // CopyImage.
- // This field is validated by Packer, when using an alias, you will have to
- // prefix kms_key_id with alias/.
- KmsKeyId string `mapstructure:"kms_key_id" required:"false"`
+ // volume encryption. This only applies to the main region, other regions
+ // where the AMI will be copied will be encrypted by the default EBS KMS key.
+ // For valid formats see KmsKeyId in the [AWS API docs -
+ // CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html)
+ // This field is validated by Packer, when using an alias, you will have to
+ // prefix kms_key_id with alias/.
+ KmsKeyId string `mapstructure:"kms_key_id" required:"false"`
// ebssurrogate only
OmitFromArtifact bool `mapstructure:"omit_from_artifact"`
}
@@ -70,26 +70,26 @@ type BlockDevices struct {
type AMIBlockDevices struct {
// Add one or
- // more block device
- // mappings
- // to the AMI. These will be attached when booting a new instance from your
- // AMI. If this field is populated, and you are building from an existing source image,
- // the block device mappings in the source image will be overwritten. This means you
- // must have a block device mapping entry for your root volume, root_volume_size,
- // and root_device_name. `Your options here may vary depending on the type of VM
- // you use. The block device mappings allow for the following configuration:
+ // more block device
+ // mappings
+ // to the AMI. These will be attached when booting a new instance from your
+ // AMI. If this field is populated, and you are building from an existing source image,
+ // the block device mappings in the source image will be overwritten. This means you
+ // must have a block device mapping entry for your root volume, root_volume_size,
+ // and root_device_name. `Your options here may vary depending on the type of VM
+ // you use. The block device mappings allow for the following configuration:
AMIMappings []BlockDevice `mapstructure:"ami_block_device_mappings" required:"false"`
}
type LaunchBlockDevices struct {
// Add one
- // or more block devices before the Packer build starts. If you add instance
- // store volumes or EBS volumes in addition to the root device volume, the
- // created AMI will contain block device mapping information for those
- // volumes. Amazon creates snapshots of the source instance's root volume and
- // any other EBS volumes described here. When you launch an instance from this
- // new AMI, the instance automatically launches with these additional volumes,
- // and will restore them from snapshots taken from the source instance.
+ // or more block devices before the Packer build starts. If you add instance
+ // store volumes or EBS volumes in addition to the root device volume, the
+ // created AMI will contain block device mapping information for those
+ // volumes. Amazon creates snapshots of the source instance's root volume and
+ // any other EBS volumes described here. When you launch an instance from this
+ // new AMI, the instance automatically launches with these additional volumes,
+ // and will restore them from snapshots taken from the source instance.
LaunchMappings []BlockDevice `mapstructure:"launch_block_device_mappings" required:"false"`
}
diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go
index f4e6ae56e..63da11d33 100644
--- a/builder/amazon/common/run_config.go
+++ b/builder/amazon/common/run_config.go
@@ -61,147 +61,176 @@ func (d *SecurityGroupFilterOptions) Empty() bool {
// AMI and details on how to access that launched image.
type RunConfig struct {
// If using a non-default VPC,
- // public IP addresses are not provided by default. If this is true, your
- // new instance will get a Public IP. default: false
- AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address" required:"false"`
+ // public IP addresses are not provided by default. If this is true, your
+ // new instance will get a Public IP. default: false
+ AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address" required:"false"`
// Destination availability zone to launch
- // instance in. Leave this empty to allow Amazon to auto-assign.
- AvailabilityZone string `mapstructure:"availability_zone" required:"false"`
+ // instance in. Leave this empty to allow Amazon to auto-assign.
+ AvailabilityZone string `mapstructure:"availability_zone" required:"false"`
// Requires spot_price to be set. The
- // required duration for the Spot Instances (also known as Spot blocks). This
- // value must be a multiple of 60 (60, 120, 180, 240, 300, or 360). You can't
- // specify an Availability Zone group or a launch group if you specify a
- // duration.
- BlockDurationMinutes int64 `mapstructure:"block_duration_minutes" required:"false"`
- // Packer normally stops the build
- // instance after all provisioners have run. For Windows instances, it is
- // sometimes desirable to run
- // Sysprep
- // which will stop the instance for you. If this is set to true, Packer
- // will not stop the instance but will assume that you will send the stop
- // signal yourself through your final provisioner. You can do this with a
- // windows-shell
- // provisioner.
- DisableStopInstance bool `mapstructure:"disable_stop_instance" required:"false"`
- // Mark instance as EBS
- // Optimized.
- // Default false.
- EbsOptimized bool `mapstructure:"ebs_optimized" required:"false"`
- // Enabling T2 Unlimited allows the source
- // instance to burst additional CPU beyond its available CPU
- // Credits
- // for as long as the demand exists. This is in contrast to the standard
- // configuration that only allows an instance to consume up to its available
- // CPU Credits. See the AWS documentation for T2
- // Unlimited
- // and the T2 Unlimited Pricing section of the Amazon EC2 On-Demand
- // Pricing document for more
- // information. By default this option is disabled and Packer will set up a
- // T2
- // Standard
- // instance instead.
- EnableT2Unlimited bool `mapstructure:"enable_t2_unlimited" required:"false"`
+ // required duration for the Spot Instances (also known as Spot blocks). This
+ // value must be a multiple of 60 (60, 120, 180, 240, 300, or 360). You can't
+ // specify an Availability Zone group or a launch group if you specify a
+ // duration.
+ BlockDurationMinutes int64 `mapstructure:"block_duration_minutes" required:"false"`
+ // Packer normally stops the build instance after all provisioners have
+ // run. For Windows instances, it is sometimes desirable to [run
+ // Sysprep](http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ami-create-standard.html)
+ // which will stop the instance for you. If this is set to `true`, Packer
+ // *will not* stop the instance but will assume that you will send the stop
+ // signal yourself through your final provisioner. You can do this with a
+ // [windows-shell
+ // provisioner](https://www.packer.io/docs/provisioners/windows-shell.html).
+ // Note that Packer will still wait for the instance to be stopped, and
+ // failing to send the stop signal yourself, when you have set this flag to
+ // `true`, will cause a timeout.
+ // Example of a valid shutdown command:
+ //
+ // ``` json
+ // {
+ // "type": "windows-shell",
+ // "inline": ["\"c:\\Program Files\\Amazon\\Ec2ConfigService\\ec2config.exe\" -sysprep"]
+ // }
+ // ```
+ DisableStopInstance bool `mapstructure:"disable_stop_instance" required:"false"`
+ // Mark instance as [EBS
+ // Optimized](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
+ // Default `false`.
+ EbsOptimized bool `mapstructure:"ebs_optimized" required:"false"`
+ // Enabling T2 Unlimited allows the source instance to burst additional CPU
+ // beyond its available [CPU
+ // Credits](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-credits-baseline-concepts.html)
+ // for as long as the demand exists. This is in contrast to the standard
+ // configuration that only allows an instance to consume up to its
+ // available CPU Credits. See the AWS documentation for [T2
+ // Unlimited](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-unlimited.html)
+ // and the **T2 Unlimited Pricing** section of the [Amazon EC2 On-Demand
+ // Pricing](https://aws.amazon.com/ec2/pricing/on-demand/) document for
+ // more information. By default this option is disabled and Packer will set
+ // up a [T2
+ // Standard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-std.html)
+ // instance instead.
+ //
+ // To use T2 Unlimited you must use a T2 instance type, e.g. `t2.micro`.
+ // Additionally, T2 Unlimited cannot be used in conjunction with Spot
+ // Instances, e.g. when the `spot_price` option has been configured.
+ // Attempting to do so will cause an error.
+ //
+ // !> **Warning!** Additional costs may be incurred by enabling T2
+ // Unlimited - even for instances that would usually qualify for the
+ // [AWS Free Tier](https://aws.amazon.com/free/).
+ EnableT2Unlimited bool `mapstructure:"enable_t2_unlimited" required:"false"`
// The name of an IAM instance
- // profile
- // to launch the EC2 instance with.
- IamInstanceProfile string `mapstructure:"iam_instance_profile" required:"false"`
+ // profile
+ // to launch the EC2 instance with.
+ IamInstanceProfile string `mapstructure:"iam_instance_profile" required:"false"`
// Automatically terminate instances on
- // shutdown in case Packer exits ungracefully. Possible values are stop and
- // terminate. Defaults to stop.
- InstanceInitiatedShutdownBehavior string `mapstructure:"shutdown_behavior" required:"false"`
+ // shutdown in case Packer exits ungracefully. Possible values are stop and
+ // terminate. Defaults to stop.
+ InstanceInitiatedShutdownBehavior string `mapstructure:"shutdown_behavior" required:"false"`
// The EC2 instance type to use while building the
- // AMI, such as t2.small.
- InstanceType string `mapstructure:"instance_type" required:"true"`
+ // AMI, such as t2.small.
+ InstanceType string `mapstructure:"instance_type" required:"true"`
// Filters used to populate the
- // security_group_ids field. Example:
- SecurityGroupFilter SecurityGroupFilterOptions `mapstructure:"security_group_filter" required:"false"`
+ // `security_group_ids` field. Example:
+ //
+ // ``` json
+ // {
+ // "security_group_filter": {
+ // "filters": {
+ // "tag:Class": "packer"
+ // }
+ // }
+ // }
+ // ```
+ SecurityGroupFilter SecurityGroupFilterOptions `mapstructure:"security_group_filter" required:"false"`
// Tags to apply to the instance
- // that is launched to create the AMI. These tags are not applied to the
- // resulting AMI unless they're duplicated in tags. This is a template
- // engine, see Build template
- // data for more information.
- RunTags map[string]string `mapstructure:"run_tags" required:"false"`
+ // that is launched to create the AMI. These tags are not applied to the
+ // resulting AMI unless they're duplicated in tags. This is a template
+ // engine, see Build template
+ // data for more information.
+ RunTags map[string]string `mapstructure:"run_tags" required:"false"`
// The ID (not the name) of the security
- // group to assign to the instance. By default this is not set and Packer will
- // automatically create a new temporary security group to allow SSH access.
- // Note that if this is specified, you must be sure the security group allows
- // access to the ssh_port given below.
- SecurityGroupId string `mapstructure:"security_group_id" required:"false"`
+ // group to assign to the instance. By default this is not set and Packer will
+ // automatically create a new temporary security group to allow SSH access.
+ // Note that if this is specified, you must be sure the security group allows
+ // access to the ssh_port given below.
+ SecurityGroupId string `mapstructure:"security_group_id" required:"false"`
// A list of security groups as
- // described above. Note that if this is specified, you must omit the
- // security_group_id.
- SecurityGroupIds []string `mapstructure:"security_group_ids" required:"false"`
+ // described above. Note that if this is specified, you must omit the
+ // security_group_id.
+ SecurityGroupIds []string `mapstructure:"security_group_ids" required:"false"`
// The source AMI whose root volume will be copied and
- // provisioned on the currently running instance. This must be an EBS-backed
- // AMI with a root volume snapshot that you have access to. Note: this is not
- // used when from_scratch is set to true.
- SourceAmi string `mapstructure:"source_ami" required:"true"`
+ // provisioned on the currently running instance. This must be an EBS-backed
+ // AMI with a root volume snapshot that you have access to. Note: this is not
+ // used when from_scratch is set to true.
+ SourceAmi string `mapstructure:"source_ami" required:"true"`
// Filters used to populate the source_ami
- // field. Example:
- SourceAmiFilter AmiFilterOptions `mapstructure:"source_ami_filter" required:"false"`
+ // field. Example:
+ SourceAmiFilter AmiFilterOptions `mapstructure:"source_ami_filter" required:"false"`
// a list of acceptable instance
- // types to run your build on. We will request a spot instance using the max
- // price of spot_price and the allocation strategy of "lowest price".
- // Your instance will be launched on an instance type of the lowest available
- // price that you have in your list. This is used in place of instance_type.
- // You may only set either spot_instance_types or instance_type, not both.
- // This feature exists to help prevent situations where a Packer build fails
- // because a particular availability zone does not have capacity for the
- // specific instance_type requested in instance_type.
- SpotInstanceTypes []string `mapstructure:"spot_instance_types" required:"false"`
+ // types to run your build on. We will request a spot instance using the max
+ // price of spot_price and the allocation strategy of "lowest price".
+ // Your instance will be launched on an instance type of the lowest available
+ // price that you have in your list. This is used in place of instance_type.
+ // You may only set either spot_instance_types or instance_type, not both.
+ // This feature exists to help prevent situations where a Packer build fails
+ // because a particular availability zone does not have capacity for the
+ // specific instance_type requested in instance_type.
+ SpotInstanceTypes []string `mapstructure:"spot_instance_types" required:"false"`
// The maximum hourly price to pay for a spot instance
- // to create the AMI. Spot instances are a type of instance that EC2 starts
- // when the current spot price is less than the maximum price you specify.
- // Spot price will be updated based on available spot instance capacity and
- // current spot instance requests. It may save you some costs. You can set
- // this to auto for Packer to automatically discover the best spot price or
- // to "0" to use an on demand instance (default).
- SpotPrice string `mapstructure:"spot_price" required:"false"`
+ // to create the AMI. Spot instances are a type of instance that EC2 starts
+ // when the current spot price is less than the maximum price you specify.
+ // Spot price will be updated based on available spot instance capacity and
+ // current spot instance requests. It may save you some costs. You can set
+ // this to auto for Packer to automatically discover the best spot price or
+ // to "0" to use an on demand instance (default).
+ SpotPrice string `mapstructure:"spot_price" required:"false"`
// Required if spot_price is set to
- // auto. This tells Packer what sort of AMI you're launching to find the
- // best spot price. This must be one of: Linux/UNIX, SUSE Linux,
- // Windows, Linux/UNIX (Amazon VPC), SUSE Linux (Amazon VPC),
- // Windows (Amazon VPC)
- SpotPriceAutoProduct string `mapstructure:"spot_price_auto_product" required:"false"`
+ // auto. This tells Packer what sort of AMI you're launching to find the
+ // best spot price. This must be one of: Linux/UNIX, SUSE Linux,
+ // Windows, Linux/UNIX (Amazon VPC), SUSE Linux (Amazon VPC),
+ // Windows (Amazon VPC)
+ SpotPriceAutoProduct string `mapstructure:"spot_price_auto_product" required:"false"`
// Requires spot_price to be
- // set. This tells Packer to apply tags to the spot request that is issued.
- SpotTags map[string]string `mapstructure:"spot_tags" required:"false"`
+ // set. This tells Packer to apply tags to the spot request that is issued.
+ SpotTags map[string]string `mapstructure:"spot_tags" required:"false"`
// Filters used to populate the subnet_id field.
- // Example:
- SubnetFilter SubnetFilterOptions `mapstructure:"subnet_filter" required:"false"`
+ // Example:
+ SubnetFilter SubnetFilterOptions `mapstructure:"subnet_filter" required:"false"`
// If using VPC, the ID of the subnet, such as
- // subnet-12345def, where Packer will launch the EC2 instance. This field is
- // required if you are using an non-default VPC.
- SubnetId string `mapstructure:"subnet_id" required:"false"`
+ // subnet-12345def, where Packer will launch the EC2 instance. This field is
+ // required if you are using an non-default VPC.
+ SubnetId string `mapstructure:"subnet_id" required:"false"`
// The name of the temporary key pair to
- // generate. By default, Packer generates a name that looks like
- // packer_, where is a 36 character unique identifier.
- TemporaryKeyPairName string `mapstructure:"temporary_key_pair_name" required:"false"`
+ // generate. By default, Packer generates a name that looks like
+ // packer_, where is a 36 character unique identifier.
+ TemporaryKeyPairName string `mapstructure:"temporary_key_pair_name" required:"false"`
// A list of IPv4
- // CIDR blocks to be authorized access to the instance, when packer is creating a temporary security group.
- TemporarySGSourceCidrs []string `mapstructure:"temporary_security_group_source_cidrs" required:"false"`
+ // CIDR blocks to be authorized access to the instance, when packer is creating a temporary security group.
+ TemporarySGSourceCidrs []string `mapstructure:"temporary_security_group_source_cidrs" required:"false"`
// User data to apply when launching the instance. Note
- // that you need to be careful about escaping characters due to the templates
- // being JSON. It is often more convenient to use user_data_file, instead.
- // Packer will not automatically wait for a user script to finish before
- // shutting down the instance this must be handled in a provisioner.
- UserData string `mapstructure:"user_data" required:"false"`
+ // that you need to be careful about escaping characters due to the templates
+ // being JSON. It is often more convenient to use user_data_file, instead.
+ // Packer will not automatically wait for a user script to finish before
+ // shutting down the instance this must be handled in a provisioner.
+ UserData string `mapstructure:"user_data" required:"false"`
// Path to a file that will be used for the user
- // data when launching the instance.
- UserDataFile string `mapstructure:"user_data_file" required:"false"`
+ // data when launching the instance.
+ UserDataFile string `mapstructure:"user_data_file" required:"false"`
// Filters used to populate the vpc_id field.
- // vpc_id take precedence over this.
- // Example:
- VpcFilter VpcFilterOptions `mapstructure:"vpc_filter" required:"false"`
+ // vpc_id take precedence over this.
+ // Example:
+ VpcFilter VpcFilterOptions `mapstructure:"vpc_filter" required:"false"`
// If launching into a VPC subnet, Packer needs the VPC ID
- // in order to create a temporary security group within the VPC. Requires
- // subnet_id to be set. If this field is left blank, Packer will try to get
- // the VPC ID from the subnet_id.
- VpcId string `mapstructure:"vpc_id" required:"false"`
+ // in order to create a temporary security group within the VPC. Requires
+ // subnet_id to be set. If this field is left blank, Packer will try to get
+ // the VPC ID from the subnet_id.
+ VpcId string `mapstructure:"vpc_id" required:"false"`
// The timeout for waiting for a Windows
- // password for Windows instances. Defaults to 20 minutes. Example value:
- // 10m
- WindowsPasswordTimeout time.Duration `mapstructure:"windows_password_timeout" required:"false"`
+ // password for Windows instances. Defaults to 20 minutes. Example value:
+ // 10m
+ WindowsPasswordTimeout time.Duration `mapstructure:"windows_password_timeout" required:"false"`
// Communicator settings
Comm communicator.Config `mapstructure:",squash"`
From 8488176dae56cb99bad609f2cef378b46c6bd3e0 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 5 Jun 2019 16:46:33 +0200
Subject: [PATCH 23/97] vagrant/builder.Config rewrap docs
---
builder/vagrant/builder.go | 63 ++++++++++++++++++--------------------
1 file changed, 30 insertions(+), 33 deletions(-)
diff --git a/builder/vagrant/builder.go b/builder/vagrant/builder.go
index be07796f1..1da055155 100644
--- a/builder/vagrant/builder.go
+++ b/builder/vagrant/builder.go
@@ -38,38 +38,36 @@ type Config struct {
common.FloppyConfig `mapstructure:",squash"`
bootcommand.BootConfig `mapstructure:",squash"`
SSHConfig `mapstructure:",squash"`
- // The directory to create that will contain
- // your output box. We always create this directory and run from inside of it to
- // prevent Vagrant init collisions. If unset, it will be set to packer- plus
- // your buildname.
+ // The directory to create that will contain your output box. We always
+ // create this directory and run from inside of it to prevent Vagrant init
+ // collisions. If unset, it will be set to packer- plus your buildname.
OutputDir string `mapstructure:"output_dir" required:"false"`
- // URL of the vagrant box to use, or the name of the
- // vagrant box. hashicorp/precise64, ./mylocalbox.box and
- // https://example.com/my-box.box are all valid source boxes. If your
- // source is a .box file, whether locally or from a URL like the latter example
- // above, you will also need to provide a box_name. This option is required,
- // unless you set global_id. You may only set one or the other, not both.
+ // URL of the vagrant box to use, or the name of the vagrant box.
+ // hashicorp/precise64, ./mylocalbox.box and https://example.com/my-box.box
+ // are all valid source boxes. If your source is a .box file, whether
+ // locally or from a URL like the latter example above, you will also need
+ // to provide a box_name. This option is required, unless you set
+ // global_id. You may only set one or the other, not both.
SourceBox string `mapstructure:"source_path" required:"true"`
- // the global id of a Vagrant box already added to Vagrant
- // on your system. You can find the global id of your Vagrant boxes using the
- // command vagrant global-status; your global_id will be a 7-digit number and
+ // the global id of a Vagrant box already added to Vagrant on your system.
+ // You can find the global id of your Vagrant boxes using the command
+ // vagrant global-status; your global_id will be a 7-digit number and
// letter comination that you'll find in the leftmost column of the
// global-status output. If you choose to use global_id instead of
// source_box, Packer will skip the Vagrant initialize and add steps, and
// simply launch the box directly using the global id.
GlobalID string `mapstructure:"global_id" required:"true"`
- // The checksum for the .box file. The type of the
- // checksum is specified with checksum_type, documented below.
+ // The checksum for the .box file. The type of the checksum is specified
+ // with checksum_type, documented below.
Checksum string `mapstructure:"checksum" required:"false"`
- // The type of the checksum specified in checksum.
- // Valid values are none, md5, sha1, sha256, or sha512. Although the
- // checksum will not be verified when checksum_type is set to "none", this is
- // not recommended since OVA files can be very large and corruption does happen
- // from time to time.
+ // The type of the checksum specified in checksum. Valid values are none,
+ // md5, sha1, sha256, or sha512. Although the checksum will not be verified
+ // when checksum_type is set to "none", this is not recommended since OVA
+ // files can be very large and corruption does happen from time to time.
ChecksumType string `mapstructure:"checksum_type" required:"false"`
- // if your source_box is a boxfile that we need to add
- // to Vagrant, this is the name to give it. If left blank, will default to
- // "packer_" plus your buildname.
+ // if your source_box is a boxfile that we need to add to Vagrant, this is
+ // the name to give it. If left blank, will default to "packer_" plus your
+ // buildname.
BoxName string `mapstructure:"box_name" required:"false"`
// The vagrant provider.
// This parameter is required when source_path have more than one provider,
@@ -82,22 +80,21 @@ type Config struct {
// What vagrantfile to use
VagrantfileTpl string `mapstructure:"vagrantfile_template"`
- // Whether to halt, suspend, or destroy the box when
- // the build has completed. Defaults to "halt"
+ // Whether to halt, suspend, or destroy the box when the build has
+ // completed. Defaults to "halt"
TeardownMethod string `mapstructure:"teardown_method" required:"false"`
// What box version to use when initializing Vagrant.
BoxVersion string `mapstructure:"box_version" required:"false"`
- // a path to a golang template for a
- // vagrantfile. Our default template can be found
- // here. So far the only template variables available to you are {{ .BoxName }} and
- // {{ .SyncedFolder }}, which correspond to the Packer options box_name and
- // synced_folder.
+ // a path to a golang template for a vagrantfile. Our default template can
+ // be found here. So far the only template variables available to you are
+ // {{ .BoxName }} and {{ .SyncedFolder }}, which correspond to the Packer
+ // options box_name and synced_folder.
Template string `mapstructure:"template" required:"false"`
SyncedFolder string `mapstructure:"synced_folder"`
- // Don't call "vagrant add" to add the box to your local
- // environment; this is necessary if you want to launch a box that is already
- // added to your vagrant environment.
+ // Don't call "vagrant add" to add the box to your local environment; this
+ // is necessary if you want to launch a box that is already added to your
+ // vagrant environment.
SkipAdd bool `mapstructure:"skip_add" required:"false"`
// Equivalent to setting the
// --cacert
From 43996843729a9585cf30f2b26b916dc895047d70 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Thu, 6 Jun 2019 16:29:25 +0200
Subject: [PATCH 24/97] make fmt autogenerated docs
---
builder/alicloud/ecs/access_config.go | 26 +-
builder/alicloud/ecs/image_config.go | 158 ++++++------
builder/alicloud/ecs/run_config.go | 136 +++++-----
builder/amazon/common/run_config.go | 16 +-
builder/amazon/ebssurrogate/builder.go | 8 +-
.../amazon/ebssurrogate/root_block_device.go | 38 +--
builder/amazon/ebsvolume/block_device.go | 6 +-
builder/amazon/instance/builder.go | 56 ++---
builder/azure/arm/clientconfig.go | 12 +-
builder/azure/arm/config.go | 232 +++++++++---------
builder/cloudstack/config.go | 178 +++++++-------
builder/digitalocean/config.go | 76 +++---
builder/docker/config.go | 98 ++++----
builder/docker/ecr_login.go | 24 +-
builder/googlecompute/config.go | 188 +++++++-------
builder/hyperone/config.go | 70 +++---
builder/hyperv/common/output_config.go | 12 +-
builder/hyperv/common/shutdown_config.go | 22 +-
builder/hyperv/iso/builder.go | 168 ++++++-------
builder/hyperv/vmcx/builder.go | 146 +++++------
builder/lxc/config.go | 70 +++---
builder/lxd/config.go | 36 +--
builder/ncloud/config.go | 48 ++--
builder/openstack/access_config.go | 106 ++++----
builder/openstack/image_config.go | 22 +-
builder/openstack/run_config.go | 154 ++++++------
builder/parallels/common/hw_config.go | 12 +-
builder/parallels/common/output_config.go | 10 +-
builder/parallels/common/prlctl_config.go | 18 +-
.../parallels/common/prlctl_post_config.go | 4 +-
.../parallels/common/prlctl_version_config.go | 8 +-
builder/parallels/common/shutdown_config.go | 12 +-
builder/parallels/common/tools_config.go | 32 +--
builder/parallels/iso/builder.go | 62 ++---
builder/parallels/pvm/config.go | 26 +-
builder/qemu/builder.go | 210 ++++++++--------
builder/scaleway/config.go | 58 ++---
builder/tencentcloud/cvm/access_config.go | 22 +-
builder/tencentcloud/cvm/image_config.go | 28 +--
builder/tencentcloud/cvm/run_config.go | 60 ++---
builder/triton/access_config.go | 40 +--
builder/triton/source_machine_config.go | 92 +++----
builder/triton/target_image_config.go | 36 +--
builder/virtualbox/common/export_config.go | 2 +-
builder/virtualbox/common/export_opts.go | 8 +-
.../common/guest_additions_config.go | 14 +-
builder/virtualbox/common/hw_config.go | 14 +-
builder/virtualbox/common/output_config.go | 10 +-
builder/virtualbox/common/run_config.go | 20 +-
builder/virtualbox/common/shutdown_config.go | 28 +--
builder/virtualbox/common/ssh_config.go | 16 +-
.../virtualbox/common/vbox_version_config.go | 12 +-
.../virtualbox/common/vboxbundle_config.go | 6 +-
.../virtualbox/common/vboxmanage_config.go | 18 +-
.../common/vboxmanage_post_config.go | 4 +-
builder/virtualbox/iso/builder.go | 118 ++++-----
builder/virtualbox/ovf/config.go | 116 ++++-----
builder/vmware/common/driver_config.go | 50 ++--
builder/vmware/common/export_config.go | 66 ++---
builder/vmware/common/hw_config.go | 46 ++--
builder/vmware/common/output_config.go | 10 +-
builder/vmware/common/run_config.go | 36 +--
builder/vmware/common/shutdown_config.go | 12 +-
builder/vmware/common/tools_config.go | 18 +-
builder/vmware/common/vmx_config.go | 34 +--
builder/vmware/iso/config.go | 96 ++++----
builder/vmware/vmx/config.go | 20 +-
builder/yandex/config.go | 98 ++++----
68 files changed, 1858 insertions(+), 1850 deletions(-)
diff --git a/builder/alicloud/ecs/access_config.go b/builder/alicloud/ecs/access_config.go
index 0d74409e2..1761183be 100644
--- a/builder/alicloud/ecs/access_config.go
+++ b/builder/alicloud/ecs/access_config.go
@@ -15,23 +15,23 @@ import (
// Config of alicloud
type AlicloudAccessConfig struct {
// This is the Alicloud access key. It must be
- // provided, but it can also be sourced from the ALICLOUD_ACCESS_KEY
- // environment variable.
- AlicloudAccessKey string `mapstructure:"access_key" required:"true"`
+ // provided, but it can also be sourced from the ALICLOUD_ACCESS_KEY
+ // environment variable.
+ AlicloudAccessKey string `mapstructure:"access_key" required:"true"`
// This is the Alicloud secret key. It must be
- // provided, but it can also be sourced from the ALICLOUD_SECRET_KEY
- // environment variable.
- AlicloudSecretKey string `mapstructure:"secret_key" required:"true"`
+ // provided, but it can also be sourced from the ALICLOUD_SECRET_KEY
+ // environment variable.
+ AlicloudSecretKey string `mapstructure:"secret_key" required:"true"`
// This is the Alicloud region. It must be provided, but
- // it can also be sourced from the ALICLOUD_REGION environment variables.
- AlicloudRegion string `mapstructure:"region" required:"true"`
+ // it can also be sourced from the ALICLOUD_REGION environment variables.
+ AlicloudRegion string `mapstructure:"region" required:"true"`
// The region validation can be skipped
- // if this value is true, the default value is false.
- AlicloudSkipValidation bool `mapstructure:"skip_region_validation" required:"false"`
+ // if this value is true, the default value is false.
+ AlicloudSkipValidation bool `mapstructure:"skip_region_validation" required:"false"`
// STS access token, can be set through template
- // or by exporting as environment variable such as
- // export SecurityToken=value.
- SecurityToken string `mapstructure:"security_token" required:"false"`
+ // or by exporting as environment variable such as
+ // export SecurityToken=value.
+ SecurityToken string `mapstructure:"security_token" required:"false"`
client *ClientWrapper
}
diff --git a/builder/alicloud/ecs/image_config.go b/builder/alicloud/ecs/image_config.go
index 1e4ff6cbc..e93b05220 100644
--- a/builder/alicloud/ecs/image_config.go
+++ b/builder/alicloud/ecs/image_config.go
@@ -12,111 +12,111 @@ import (
type AlicloudDiskDevice struct {
// The value of disk name is blank by default. [2,
- // 128] English or Chinese characters, must begin with an
- // uppercase/lowercase letter or Chinese character. Can contain numbers,
- // ., _ and -. The disk name will appear on the console. It cannot
- // begin with http:// or https://.
- DiskName string `mapstructure:"disk_name" required:"false"`
+ // 128] English or Chinese characters, must begin with an
+ // uppercase/lowercase letter or Chinese character. Can contain numbers,
+ // ., _ and -. The disk name will appear on the console. It cannot
+ // begin with http:// or https://.
+ DiskName string `mapstructure:"disk_name" required:"false"`
// Category of the system disk. Optional values
- // are:
- // - cloud - general cloud disk
- // - cloud_efficiency - efficiency cloud disk
- // - cloud_ssd - cloud SSD
- DiskCategory string `mapstructure:"disk_category" required:"false"`
+ // are:
+ // - cloud - general cloud disk
+ // - cloud_efficiency - efficiency cloud disk
+ // - cloud_ssd - cloud SSD
+ DiskCategory string `mapstructure:"disk_category" required:"false"`
// Size of the system disk, measured in GiB. Value
- // range: [20, 500]. The specified value must be equal to or greater
- // than max{20, ImageSize}. Default value: max{40, ImageSize}.
- DiskSize int `mapstructure:"disk_size" required:"false"`
+ // range: [20, 500]. The specified value must be equal to or greater
+ // than max{20, ImageSize}. Default value: max{40, ImageSize}.
+ DiskSize int `mapstructure:"disk_size" required:"false"`
// Snapshots are used to create the data
- // disk After this parameter is specified, Size is ignored. The actual
- // size of the created disk is the size of the specified snapshot.
- SnapshotId string `mapstructure:"disk_snapshot_id" required:"false"`
+ // disk After this parameter is specified, Size is ignored. The actual
+ // size of the created disk is the size of the specified snapshot.
+ SnapshotId string `mapstructure:"disk_snapshot_id" required:"false"`
// The value of disk description is blank by
- // default. [2, 256] characters. The disk description will appear on the
- // console. It cannot begin with http:// or https://.
- Description string `mapstructure:"disk_description" required:"false"`
+ // default. [2, 256] characters. The disk description will appear on the
+ // console. It cannot begin with http:// or https://.
+ Description string `mapstructure:"disk_description" required:"false"`
// Whether or not the disk is
- // released along with the instance:
- DeleteWithInstance bool `mapstructure:"disk_delete_with_instance" required:"false"`
+ // released along with the instance:
+ DeleteWithInstance bool `mapstructure:"disk_delete_with_instance" required:"false"`
// Device information of the related instance:
- // such as /dev/xvdb It is null unless the Status is In_use.
- Device string `mapstructure:"disk_device" required:"false"`
- // Whether or not to encrypt the data disk.
- // If this option is set to true, the data disk will be encryped and corresponding snapshot in the target image will also be encrypted. By
- // default, if this is an extra data disk, Packer will not encrypt the
- // data disk. Otherwise, Packer will keep the encryption setting to what
- // it was in the source image. Please refer to Introduction of ECS disk encryption
- // for more details.
- Encrypted *bool `mapstructure:"disk_encrypted" required:"false"`
+ // such as /dev/xvdb It is null unless the Status is In_use.
+ Device string `mapstructure:"disk_device" required:"false"`
+ // Whether or not to encrypt the data disk.
+ // If this option is set to true, the data disk will be encryped and corresponding snapshot in the target image will also be encrypted. By
+ // default, if this is an extra data disk, Packer will not encrypt the
+ // data disk. Otherwise, Packer will keep the encryption setting to what
+ // it was in the source image. Please refer to Introduction of ECS disk encryption
+ // for more details.
+ Encrypted *bool `mapstructure:"disk_encrypted" required:"false"`
}
type AlicloudDiskDevices struct {
// Image disk mapping for system
- // disk.
- ECSSystemDiskMapping AlicloudDiskDevice `mapstructure:"system_disk_mapping" required:"false"`
+ // disk.
+ ECSSystemDiskMapping AlicloudDiskDevice `mapstructure:"system_disk_mapping" required:"false"`
// Add one or more data
- // disks to the image.
+ // disks to the image.
ECSImagesDiskMappings []AlicloudDiskDevice `mapstructure:"image_disk_mappings" required:"false"`
}
type AlicloudImageConfig struct {
// The name of the user-defined image, [2, 128]
- // English or Chinese characters. It must begin with an uppercase/lowercase
- // letter or a Chinese character, and may contain numbers, _ or -. It
- // cannot begin with http:// or https://.
- AlicloudImageName string `mapstructure:"image_name" required:"true"`
+ // English or Chinese characters. It must begin with an uppercase/lowercase
+ // letter or a Chinese character, and may contain numbers, _ or -. It
+ // cannot begin with http:// or https://.
+ AlicloudImageName string `mapstructure:"image_name" required:"true"`
// The version number of the image, with a length
- // limit of 1 to 40 English characters.
- AlicloudImageVersion string `mapstructure:"image_version" required:"false"`
+ // limit of 1 to 40 English characters.
+ AlicloudImageVersion string `mapstructure:"image_version" required:"false"`
// The description of the image, with a length
- // limit of 0 to 256 characters. Leaving it blank means null, which is the
- // default value. It cannot begin with http:// or https://.
- AlicloudImageDescription string `mapstructure:"image_description" required:"false"`
+ // limit of 0 to 256 characters. Leaving it blank means null, which is the
+ // default value. It cannot begin with http:// or https://.
+ AlicloudImageDescription string `mapstructure:"image_description" required:"false"`
// The IDs of to-be-added Aliyun
- // accounts to which the image is shared. The number of accounts is 1 to 10.
- // If number of accounts is greater than 10, this parameter is ignored.
- AlicloudImageShareAccounts []string `mapstructure:"image_share_account" required:"false"`
- AlicloudImageUNShareAccounts []string `mapstructure:"image_unshare_account"`
+ // accounts to which the image is shared. The number of accounts is 1 to 10.
+ // If number of accounts is greater than 10, this parameter is ignored.
+ AlicloudImageShareAccounts []string `mapstructure:"image_share_account" required:"false"`
+ AlicloudImageUNShareAccounts []string `mapstructure:"image_unshare_account"`
// Copy to the destination regionIds.
- AlicloudImageDestinationRegions []string `mapstructure:"image_copy_regions" required:"false"`
+ AlicloudImageDestinationRegions []string `mapstructure:"image_copy_regions" required:"false"`
// The name of the destination image,
- // [2, 128] English or Chinese characters. It must begin with an
- // uppercase/lowercase letter or a Chinese character, and may contain numbers,
- // _ or -. It cannot begin with http:// or https://.
- AlicloudImageDestinationNames []string `mapstructure:"image_copy_names" required:"false"`
- // Whether or not to encrypt the target images, including those copied if image_copy_regions is specified. If this option
- // is set to true, a temporary image will be created from the provisioned
- // instance in the main region and an encrypted copy will be generated in the
- // same region. By default, Packer will keep the encryption setting to what
- // it was in the source image.
- ImageEncrypted *bool `mapstructure:"image_encrypted" required:"false"`
+ // [2, 128] English or Chinese characters. It must begin with an
+ // uppercase/lowercase letter or a Chinese character, and may contain numbers,
+ // _ or -. It cannot begin with http:// or https://.
+ AlicloudImageDestinationNames []string `mapstructure:"image_copy_names" required:"false"`
+ // Whether or not to encrypt the target images, including those copied if image_copy_regions is specified. If this option
+ // is set to true, a temporary image will be created from the provisioned
+ // instance in the main region and an encrypted copy will be generated in the
+ // same region. By default, Packer will keep the encryption setting to what
+ // it was in the source image.
+ ImageEncrypted *bool `mapstructure:"image_encrypted" required:"false"`
// If this value is true, when the target
- // image names including those copied are duplicated with existing images, it
- // will delete the existing images and then create the target images,
- // otherwise, the creation will fail. The default value is false. Check
- // image_name and image_copy_names options for names of target images. If
- // -force option is
- // provided in build command, this option can be omitted and taken as true.
- AlicloudImageForceDelete bool `mapstructure:"image_force_delete" required:"false"`
+ // image names including those copied are duplicated with existing images, it
+ // will delete the existing images and then create the target images,
+ // otherwise, the creation will fail. The default value is false. Check
+ // image_name and image_copy_names options for names of target images. If
+ // -force option is
+ // provided in build command, this option can be omitted and taken as true.
+ AlicloudImageForceDelete bool `mapstructure:"image_force_delete" required:"false"`
// If this value is true, when
- // delete the duplicated existing images, the source snapshots of those images
- // will be delete either. If
- // -force option is
- // provided in build command, this option can be omitted and taken as true.
- AlicloudImageForceDeleteSnapshots bool `mapstructure:"image_force_delete_snapshots" required:"false"`
- AlicloudImageForceDeleteInstances bool `mapstructure:"image_force_delete_instances"`
+ // delete the duplicated existing images, the source snapshots of those images
+ // will be delete either. If
+ // -force option is
+ // provided in build command, this option can be omitted and taken as true.
+ AlicloudImageForceDeleteSnapshots bool `mapstructure:"image_force_delete_snapshots" required:"false"`
+ AlicloudImageForceDeleteInstances bool `mapstructure:"image_force_delete_instances"`
// If this value is true, the image
- // created will not include any snapshot of data disks. This option would be
- // useful for any circumstance that default data disks with instance types are
- // not concerned. The default value is false.
- AlicloudImageIgnoreDataDisks bool `mapstructure:"image_ignore_data_disks" required:"false"`
+ // created will not include any snapshot of data disks. This option would be
+ // useful for any circumstance that default data disks with instance types are
+ // not concerned. The default value is false.
+ AlicloudImageIgnoreDataDisks bool `mapstructure:"image_ignore_data_disks" required:"false"`
// The region validation can be skipped
- // if this value is true, the default value is false.
- AlicloudImageSkipRegionValidation bool `mapstructure:"skip_region_validation" required:"false"`
+ // if this value is true, the default value is false.
+ AlicloudImageSkipRegionValidation bool `mapstructure:"skip_region_validation" required:"false"`
// Tags applied to the destination
- // image and relevant snapshots.
- AlicloudImageTags map[string]string `mapstructure:"tags" required:"false"`
- AlicloudDiskDevices `mapstructure:",squash"`
+ // image and relevant snapshots.
+ AlicloudImageTags map[string]string `mapstructure:"tags" required:"false"`
+ AlicloudDiskDevices `mapstructure:",squash"`
}
func (c *AlicloudImageConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/alicloud/ecs/run_config.go b/builder/alicloud/ecs/run_config.go
index 070e2557e..917e8b319 100644
--- a/builder/alicloud/ecs/run_config.go
+++ b/builder/alicloud/ecs/run_config.go
@@ -14,96 +14,96 @@ import (
)
type RunConfig struct {
- AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address"`
+ AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address"`
// ID of the zone to which the disk belongs.
- ZoneId string `mapstructure:"zone_id" required:"false"`
+ ZoneId string `mapstructure:"zone_id" required:"false"`
// Whether an ECS instance is I/O optimized or not.
- // The default value is false.
- IOOptimized bool `mapstructure:"io_optimized" required:"false"`
+ // The default value is false.
+ IOOptimized bool `mapstructure:"io_optimized" required:"false"`
// Type of the instance. For values, see Instance
- // Type
- // Table.
- // You can also obtain the latest instance type table by invoking the
- // Querying Instance Type
- // Table
- // interface.
- InstanceType string `mapstructure:"instance_type" required:"true"`
- Description string `mapstructure:"description"`
+ // Type
+ // Table.
+ // You can also obtain the latest instance type table by invoking the
+ // Querying Instance Type
+ // Table
+ // interface.
+ InstanceType string `mapstructure:"instance_type" required:"true"`
+ Description string `mapstructure:"description"`
// This is the base image id which you want to
- // create your customized images.
- AlicloudSourceImage string `mapstructure:"source_image" required:"true"`
+ // create your customized images.
+ AlicloudSourceImage string `mapstructure:"source_image" required:"true"`
// Whether to force shutdown upon device
- // restart. The default value is false.
- ForceStopInstance bool `mapstructure:"force_stop_instance" required:"false"`
+ // restart. The default value is false.
+ ForceStopInstance bool `mapstructure:"force_stop_instance" required:"false"`
// If this option is set to true, Packer
- // will not stop the instance for you, and you need to make sure the instance
- // will be stopped in the final provisioner command. Otherwise, Packer will
- // timeout while waiting the instance to be stopped. This option is provided
- // for some specific scenarios that you want to stop the instance by yourself.
- // E.g., Sysprep a windows which may shutdown the instance within its command.
- // The default value is false.
- DisableStopInstance bool `mapstructure:"disable_stop_instance" required:"false"`
+ // will not stop the instance for you, and you need to make sure the instance
+ // will be stopped in the final provisioner command. Otherwise, Packer will
+ // timeout while waiting the instance to be stopped. This option is provided
+ // for some specific scenarios that you want to stop the instance by yourself.
+ // E.g., Sysprep a windows which may shutdown the instance within its command.
+ // The default value is false.
+ DisableStopInstance bool `mapstructure:"disable_stop_instance" required:"false"`
// ID of the security group to which a newly
- // created instance belongs. Mutual access is allowed between instances in one
- // security group. If not specified, the newly created instance will be added
- // to the default security group. If the default group doesn’t exist, or the
- // number of instances in it has reached the maximum limit, a new security
- // group will be created automatically.
- SecurityGroupId string `mapstructure:"security_group_id" required:"false"`
+ // created instance belongs. Mutual access is allowed between instances in one
+ // security group. If not specified, the newly created instance will be added
+ // to the default security group. If the default group doesn’t exist, or the
+ // number of instances in it has reached the maximum limit, a new security
+ // group will be created automatically.
+ SecurityGroupId string `mapstructure:"security_group_id" required:"false"`
// The security group name. The default value
- // is blank. [2, 128] English or Chinese characters, must begin with an
- // uppercase/lowercase letter or Chinese character. Can contain numbers, .,
- // _ or -. It cannot begin with http:// or https://.
- SecurityGroupName string `mapstructure:"security_group_name" required:"false"`
+ // is blank. [2, 128] English or Chinese characters, must begin with an
+ // uppercase/lowercase letter or Chinese character. Can contain numbers, .,
+ // _ or -. It cannot begin with http:// or https://.
+ SecurityGroupName string `mapstructure:"security_group_name" required:"false"`
// User data to apply when launching the instance. Note
- // that you need to be careful about escaping characters due to the templates
- // being JSON. It is often more convenient to use user_data_file, instead.
- // Packer will not automatically wait for a user script to finish before
- // shutting down the instance this must be handled in a provisioner.
- UserData string `mapstructure:"user_data" required:"false"`
+ // that you need to be careful about escaping characters due to the templates
+ // being JSON. It is often more convenient to use user_data_file, instead.
+ // Packer will not automatically wait for a user script to finish before
+ // shutting down the instance this must be handled in a provisioner.
+ UserData string `mapstructure:"user_data" required:"false"`
// Path to a file that will be used for the user
- // data when launching the instance.
- UserDataFile string `mapstructure:"user_data_file" required:"false"`
+ // data when launching the instance.
+ UserDataFile string `mapstructure:"user_data_file" required:"false"`
// VPC ID allocated by the system.
- VpcId string `mapstructure:"vpc_id" required:"false"`
+ VpcId string `mapstructure:"vpc_id" required:"false"`
// The VPC name. The default value is blank. [2, 128]
- // English or Chinese characters, must begin with an uppercase/lowercase
- // letter or Chinese character. Can contain numbers, _ and -. The disk
- // description will appear on the console. Cannot begin with http:// or
- // https://.
- VpcName string `mapstructure:"vpc_name" required:"false"`
+ // English or Chinese characters, must begin with an uppercase/lowercase
+ // letter or Chinese character. Can contain numbers, _ and -. The disk
+ // description will appear on the console. Cannot begin with http:// or
+ // https://.
+ VpcName string `mapstructure:"vpc_name" required:"false"`
// Value options: 192.168.0.0/16 and
- // 172.16.0.0/16. When not specified, the default value is 172.16.0.0/16.
- CidrBlock string `mapstructure:"vpc_cidr_block" required:"false"`
+ // 172.16.0.0/16. When not specified, the default value is 172.16.0.0/16.
+ CidrBlock string `mapstructure:"vpc_cidr_block" required:"false"`
// The ID of the VSwitch to be used.
- VSwitchId string `mapstructure:"vswitch_id" required:"false"`
+ VSwitchId string `mapstructure:"vswitch_id" required:"false"`
// The ID of the VSwitch to be used.
- VSwitchName string `mapstructure:"vswitch_id" required:"false"`
+ VSwitchName string `mapstructure:"vswitch_id" required:"false"`
// Display name of the instance, which is a string
- // of 2 to 128 Chinese or English characters. It must begin with an
- // uppercase/lowercase letter or a Chinese character and can contain numerals,
- // ., _, or -. The instance name is displayed on the Alibaba Cloud
- // console. If this parameter is not specified, the default value is
- // InstanceId of the instance. It cannot begin with http:// or https://.
- InstanceName string `mapstructure:"instance_name" required:"false"`
+ // of 2 to 128 Chinese or English characters. It must begin with an
+ // uppercase/lowercase letter or a Chinese character and can contain numerals,
+ // ., _, or -. The instance name is displayed on the Alibaba Cloud
+ // console. If this parameter is not specified, the default value is
+ // InstanceId of the instance. It cannot begin with http:// or https://.
+ InstanceName string `mapstructure:"instance_name" required:"false"`
// Internet charge type, which can be
- // PayByTraffic or PayByBandwidth. Optional values:
- InternetChargeType string `mapstructure:"internet_charge_type" required:"false"`
+ // PayByTraffic or PayByBandwidth. Optional values:
+ InternetChargeType string `mapstructure:"internet_charge_type" required:"false"`
// Maximum outgoing bandwidth to the
- // public network, measured in Mbps (Mega bits per second).
- InternetMaxBandwidthOut int `mapstructure:"internet_max_bandwidth_out" required:"false"`
+ // public network, measured in Mbps (Mega bits per second).
+ InternetMaxBandwidthOut int `mapstructure:"internet_max_bandwidth_out" required:"false"`
// Timeout of creating snapshot(s).
- // The default timeout is 3600 seconds if this option is not set or is set
- // to 0. For those disks containing lots of data, it may require a higher
- // timeout value.
- WaitSnapshotReadyTimeout int `mapstructure:"wait_snapshot_ready_timeout" required:"false"`
+ // The default timeout is 3600 seconds if this option is not set or is set
+ // to 0. For those disks containing lots of data, it may require a higher
+ // timeout value.
+ WaitSnapshotReadyTimeout int `mapstructure:"wait_snapshot_ready_timeout" required:"false"`
// Communicator settings
- Comm communicator.Config `mapstructure:",squash"`
+ Comm communicator.Config `mapstructure:",squash"`
// If this value is true, packer will connect to
- // the ECS created through private ip instead of allocating a public ip or an
- // EIP. The default value is false.
- SSHPrivateIp bool `mapstructure:"ssh_private_ip" required:"false"`
+ // the ECS created through private ip instead of allocating a public ip or an
+ // EIP. The default value is false.
+ SSHPrivateIp bool `mapstructure:"ssh_private_ip" required:"false"`
}
func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go
index 63da11d33..11c1f0e14 100644
--- a/builder/amazon/common/run_config.go
+++ b/builder/amazon/common/run_config.go
@@ -120,8 +120,8 @@ type RunConfig struct {
// Unlimited - even for instances that would usually qualify for the
// [AWS Free Tier](https://aws.amazon.com/free/).
EnableT2Unlimited bool `mapstructure:"enable_t2_unlimited" required:"false"`
- // The name of an IAM instance
- // profile
+ // The name of an [IAM instance
+ // profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html)
// to launch the EC2 instance with.
IamInstanceProfile string `mapstructure:"iam_instance_profile" required:"false"`
// Automatically terminate instances on
@@ -131,8 +131,7 @@ type RunConfig struct {
// The EC2 instance type to use while building the
// AMI, such as t2.small.
InstanceType string `mapstructure:"instance_type" required:"true"`
- // Filters used to populate the
- // `security_group_ids` field. Example:
+ // Filters used to populate the `security_group_ids` field. Example:
//
// ``` json
// {
@@ -143,6 +142,15 @@ type RunConfig struct {
// }
// }
// ```
+ //
+ // This selects the SG's with tag `Class` with the value `packer`.
+ //
+ // - `filters` (map of strings) - filters used to select a
+ // `security_group_ids`. Any filter described in the docs for
+ // [DescribeSecurityGroups](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
+ // is valid.
+
+ // `security_group_ids` take precedence over this.
SecurityGroupFilter SecurityGroupFilterOptions `mapstructure:"security_group_filter" required:"false"`
// Tags to apply to the instance
// that is launched to create the AMI. These tags are not applied to the
diff --git a/builder/amazon/ebssurrogate/builder.go b/builder/amazon/ebssurrogate/builder.go
index afb4f3905..92ca1a76f 100644
--- a/builder/amazon/ebssurrogate/builder.go
+++ b/builder/amazon/ebssurrogate/builder.go
@@ -28,13 +28,13 @@ type Config struct {
awscommon.BlockDevices `mapstructure:",squash"`
awscommon.AMIConfig `mapstructure:",squash"`
// A block device mapping
- // describing the root device of the AMI. This looks like the mappings in
- // ami_block_device_mapping, except with an additional field:
+ // describing the root device of the AMI. This looks like the mappings in
+ // ami_block_device_mapping, except with an additional field:
RootDevice RootBlockDevice `mapstructure:"ami_root_device" required:"true"`
VolumeRunTags awscommon.TagMap `mapstructure:"run_volume_tags"`
// what architecture to use when registering the
- // final AMI; valid options are "x86_64" or "arm64". Defaults to "x86_64".
- Architecture string `mapstructure:"ami_architecture" required:"false"`
+ // final AMI; valid options are "x86_64" or "arm64". Defaults to "x86_64".
+ Architecture string `mapstructure:"ami_architecture" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/amazon/ebssurrogate/root_block_device.go b/builder/amazon/ebssurrogate/root_block_device.go
index e615fd504..146803a09 100644
--- a/builder/amazon/ebssurrogate/root_block_device.go
+++ b/builder/amazon/ebssurrogate/root_block_device.go
@@ -9,30 +9,30 @@ import (
)
type RootBlockDevice struct {
- SourceDeviceName string `mapstructure:"source_device_name"`
+ SourceDeviceName string `mapstructure:"source_device_name"`
// The device name exposed to the instance (for
- // example, /dev/sdh or xvdh). Required for every device in the block
- // device mapping.
- DeviceName string `mapstructure:"device_name" required:"false"`
+ // example, /dev/sdh or xvdh). Required for every device in the block
+ // device mapping.
+ DeviceName string `mapstructure:"device_name" required:"false"`
// Indicates whether the EBS volume is
- // deleted on instance termination. Default false. NOTE: If this
- // value is not explicitly set to true and volumes are not cleaned up by
- // an alternative method, additional volumes will accumulate after every
- // build.
- DeleteOnTermination bool `mapstructure:"delete_on_termination" required:"false"`
+ // deleted on instance termination. Default false. NOTE: If this
+ // value is not explicitly set to true and volumes are not cleaned up by
+ // an alternative method, additional volumes will accumulate after every
+ // build.
+ DeleteOnTermination bool `mapstructure:"delete_on_termination" required:"false"`
// The number of I/O operations per second (IOPS) that
- // the volume supports. See the documentation on
- // IOPs
- // for more information
- IOPS int64 `mapstructure:"iops" required:"false"`
+ // the volume supports. See the documentation on
+ // IOPs
+ // for more information
+ IOPS int64 `mapstructure:"iops" required:"false"`
// The volume type. gp2 for General Purpose
- // (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, st1 for
- // Throughput Optimized HDD, sc1 for Cold HDD, and standard for
- // Magnetic volumes.
- VolumeType string `mapstructure:"volume_type" required:"false"`
+ // (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, st1 for
+ // Throughput Optimized HDD, sc1 for Cold HDD, and standard for
+ // Magnetic volumes.
+ VolumeType string `mapstructure:"volume_type" required:"false"`
// The size of the volume, in GiB. Required if
- // not specifying a snapshot_id.
- VolumeSize int64 `mapstructure:"volume_size" required:"false"`
+ // not specifying a snapshot_id.
+ VolumeSize int64 `mapstructure:"volume_size" required:"false"`
}
func (c *RootBlockDevice) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/amazon/ebsvolume/block_device.go b/builder/amazon/ebsvolume/block_device.go
index 0e6e5da19..f628d20f5 100644
--- a/builder/amazon/ebsvolume/block_device.go
+++ b/builder/amazon/ebsvolume/block_device.go
@@ -10,9 +10,9 @@ import (
type BlockDevice struct {
awscommon.BlockDevice `mapstructure:"-,squash"`
// Tags applied to the AMI. This is a
- // template engine, see Build template
- // data for more information.
- Tags awscommon.TagMap `mapstructure:"tags" required:"false"`
+ // template engine, see Build template
+ // data for more information.
+ Tags awscommon.TagMap `mapstructure:"tags" required:"false"`
}
func commonBlockDevices(mappings []BlockDevice, ctx *interpolate.Context) (awscommon.BlockDevices, error) {
diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go
index 201dfe73a..23208ac3d 100644
--- a/builder/amazon/instance/builder.go
+++ b/builder/amazon/instance/builder.go
@@ -33,43 +33,43 @@ type Config struct {
awscommon.BlockDevices `mapstructure:",squash"`
awscommon.RunConfig `mapstructure:",squash"`
// Your AWS account ID. This is required for bundling
- // the AMI. This is not the same as the access key. You can find your
- // account ID in the security credentials page of your AWS account.
- AccountId string `mapstructure:"account_id" required:"true"`
+ // the AMI. This is not the same as the access key. You can find your
+ // account ID in the security credentials page of your AWS account.
+ AccountId string `mapstructure:"account_id" required:"true"`
// The directory on the running instance where
- // the bundled AMI will be saved prior to uploading. By default this is
- // /tmp. This directory must exist and be writable.
- BundleDestination string `mapstructure:"bundle_destination" required:"false"`
+ // the bundled AMI will be saved prior to uploading. By default this is
+ // /tmp. This directory must exist and be writable.
+ BundleDestination string `mapstructure:"bundle_destination" required:"false"`
// The prefix for files created from bundling the
- // root volume. By default this is image-{{timestamp}}. The timestamp
- // variable should be used to make sure this is unique, otherwise it can
- // collide with other created AMIs by Packer in your account.
- BundlePrefix string `mapstructure:"bundle_prefix" required:"false"`
+ // root volume. By default this is image-{{timestamp}}. The timestamp
+ // variable should be used to make sure this is unique, otherwise it can
+ // collide with other created AMIs by Packer in your account.
+ BundlePrefix string `mapstructure:"bundle_prefix" required:"false"`
// The command to use to upload the bundled
- // volume. See the "custom bundle commands" section below for more
- // information.
+ // volume. See the "custom bundle commands" section below for more
+ // information.
BundleUploadCommand string `mapstructure:"bundle_upload_command" required:"false"`
// The command to use to bundle the volume.
- // See the "custom bundle commands" section below for more information.
- BundleVolCommand string `mapstructure:"bundle_vol_command" required:"false"`
+ // See the "custom bundle commands" section below for more information.
+ BundleVolCommand string `mapstructure:"bundle_vol_command" required:"false"`
// The name of the S3 bucket to upload the AMI. This
- // bucket will be created if it doesn't exist.
- S3Bucket string `mapstructure:"s3_bucket" required:"true"`
+ // bucket will be created if it doesn't exist.
+ S3Bucket string `mapstructure:"s3_bucket" required:"true"`
// The local path to a valid X509 certificate for
- // your AWS account. This is used for bundling the AMI. This X509 certificate
- // must be registered with your account from the security credentials page in
- // the AWS console.
- X509CertPath string `mapstructure:"x509_cert_path" required:"true"`
+ // your AWS account. This is used for bundling the AMI. This X509 certificate
+ // must be registered with your account from the security credentials page in
+ // the AWS console.
+ X509CertPath string `mapstructure:"x509_cert_path" required:"true"`
// The local path to the private key for the X509
- // certificate specified by x509_cert_path. This is used for bundling the
- // AMI.
- X509KeyPath string `mapstructure:"x509_key_path" required:"true"`
+ // certificate specified by x509_cert_path. This is used for bundling the
+ // AMI.
+ X509KeyPath string `mapstructure:"x509_key_path" required:"true"`
// The path on the remote machine where the X509
- // certificate will be uploaded. This path must already exist and be writable.
- // X509 certificates are uploaded after provisioning is run, so it is
- // perfectly okay to create this directory as part of the provisioning
- // process. Defaults to /tmp.
- X509UploadPath string `mapstructure:"x509_upload_path" required:"false"`
+ // certificate will be uploaded. This path must already exist and be writable.
+ // X509 certificates are uploaded after provisioning is run, so it is
+ // perfectly okay to create this directory as part of the provisioning
+ // process. Defaults to /tmp.
+ X509UploadPath string `mapstructure:"x509_upload_path" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/azure/arm/clientconfig.go b/builder/azure/arm/clientconfig.go
index ab8a22ef9..f3a561d73 100644
--- a/builder/azure/arm/clientconfig.go
+++ b/builder/azure/arm/clientconfig.go
@@ -17,8 +17,8 @@ import (
// ClientConfig allows for various ways to authenticate Azure clients
type ClientConfig struct {
// One of Public, China, Germany, or
- // USGovernment. Defaults to Public. Long forms such as
- // USGovernmentCloud and AzureUSGovernmentCloud are also supported.
+ // USGovernment. Defaults to Public. Long forms such as
+ // USGovernmentCloud and AzureUSGovernmentCloud are also supported.
CloudEnvironmentName string `mapstructure:"cloud_environment_name" required:"false"`
cloudEnvironment *azure.Environment
@@ -31,11 +31,11 @@ type ClientConfig struct {
// Certificate path for client auth
ClientCertPath string `mapstructure:"client_cert_path"`
// JWT bearer token for client auth (RFC 7523, Sec. 2.2)
- ClientJWT string `mapstructure:"client_jwt"`
- ObjectID string `mapstructure:"object_id"`
+ ClientJWT string `mapstructure:"client_jwt"`
+ ObjectID string `mapstructure:"object_id"`
// The account identifier with which your client_id and
- // subscription_id are associated. If not specified, tenant_id will be
- // looked up using subscription_id.
+ // subscription_id are associated. If not specified, tenant_id will be
+ // looked up using subscription_id.
TenantID string `mapstructure:"tenant_id" required:"false"`
SubscriptionID string `mapstructure:"subscription_id"`
}
diff --git a/builder/azure/arm/config.go b/builder/azure/arm/config.go
index b0b740474..e28d046d9 100644
--- a/builder/azure/arm/config.go
+++ b/builder/azure/arm/config.go
@@ -73,11 +73,11 @@ type SharedImageGallery struct {
GalleryName string `mapstructure:"gallery_name"`
ImageName string `mapstructure:"image_name"`
// Specify a specific version of an OS to boot from.
- // Defaults to latest. There may be a difference in versions available
- // across regions due to image synchronization latency. To ensure a consistent
- // version across regions set this value to one that is available in all
- // regions where you are deploying.
- ImageVersion string `mapstructure:"image_version" required:"false"`
+ // Defaults to latest. There may be a difference in versions available
+ // across regions due to image synchronization latency. To ensure a consistent
+ // version across regions set this value to one that is available in all
+ // regions where you are deploying.
+ ImageVersion string `mapstructure:"image_version" required:"false"`
}
type Config struct {
@@ -90,147 +90,147 @@ type Config struct {
CaptureNamePrefix string `mapstructure:"capture_name_prefix"`
CaptureContainerName string `mapstructure:"capture_container_name"`
// Use a Shared Gallery
- // image
- // as the source for this build. VHD targets are incompatible with this build
- // type - the target must be a Managed Image.
+ // image
+ // as the source for this build. VHD targets are incompatible with this build
+ // type - the target must be a Managed Image.
SharedGallery SharedImageGallery `mapstructure:"shared_image_gallery" required:"false"`
// PublisherName for your base image. See
- // documentation
- // for details.
+ // documentation
+ // for details.
ImagePublisher string `mapstructure:"image_publisher" required:"true"`
// Offer for your base image. See
- // documentation
- // for details.
- ImageOffer string `mapstructure:"image_offer" required:"true"`
+ // documentation
+ // for details.
+ ImageOffer string `mapstructure:"image_offer" required:"true"`
// SKU for your base image. See
- // documentation
- // for details.
- ImageSku string `mapstructure:"image_sku" required:"true"`
+ // documentation
+ // for details.
+ ImageSku string `mapstructure:"image_sku" required:"true"`
// Specify a specific version of an OS to boot from.
- // Defaults to latest. There may be a difference in versions available
- // across regions due to image synchronization latency. To ensure a consistent
- // version across regions set this value to one that is available in all
- // regions where you are deploying.
- ImageVersion string `mapstructure:"image_version" required:"false"`
+ // Defaults to latest. There may be a difference in versions available
+ // across regions due to image synchronization latency. To ensure a consistent
+ // version across regions set this value to one that is available in all
+ // regions where you are deploying.
+ ImageVersion string `mapstructure:"image_version" required:"false"`
// Specify a custom VHD to use. If this value is set, do
- // not set image_publisher, image_offer, image_sku, or image_version.
- ImageUrl string `mapstructure:"image_url" required:"false"`
+ // not set image_publisher, image_offer, image_sku, or image_version.
+ ImageUrl string `mapstructure:"image_url" required:"false"`
// Specify the source
- // managed image's resource group used to use. If this value is set, do not
- // set image_publisher, image_offer, image_sku, or image_version. If this
- // value is set, the value custom_managed_image_name must also be set. See
- // documentation
- // to learn more about managed images.
+ // managed image's resource group used to use. If this value is set, do not
+ // set image_publisher, image_offer, image_sku, or image_version. If this
+ // value is set, the value custom_managed_image_name must also be set. See
+ // documentation
+ // to learn more about managed images.
CustomManagedImageResourceGroupName string `mapstructure:"custom_managed_image_resource_group_name" required:"false"`
// Specify the source managed image's
- // name to use. If this value is set, do not set image_publisher,
- // image_offer, image_sku, or image_version. If this value is set, the
- // value custom_managed_image_resource_group_name must also be set. See
- // documentation
- // to learn more about managed images.
- CustomManagedImageName string `mapstructure:"custom_managed_image_name" required:"false"`
- customManagedImageID string
+ // name to use. If this value is set, do not set image_publisher,
+ // image_offer, image_sku, or image_version. If this value is set, the
+ // value custom_managed_image_resource_group_name must also be set. See
+ // documentation
+ // to learn more about managed images.
+ CustomManagedImageName string `mapstructure:"custom_managed_image_name" required:"false"`
+ customManagedImageID string
Location string `mapstructure:"location"`
// Size of the VM used for building. This can be changed
- // when you deploy a VM from your VHD. See
- // pricing
- // information. Defaults to Standard_A1.
- VMSize string `mapstructure:"vm_size" required:"false"`
+ // when you deploy a VM from your VHD. See
+ // pricing
+ // information. Defaults to Standard_A1.
+ VMSize string `mapstructure:"vm_size" required:"false"`
- ManagedImageResourceGroupName string `mapstructure:"managed_image_resource_group_name"`
- ManagedImageName string `mapstructure:"managed_image_name"`
+ ManagedImageResourceGroupName string `mapstructure:"managed_image_resource_group_name"`
+ ManagedImageName string `mapstructure:"managed_image_name"`
// Specify the storage account
- // type for a managed image. Valid values are Standard_LRS and Premium_LRS.
- // The default is Standard_LRS.
- ManagedImageStorageAccountType string `mapstructure:"managed_image_storage_account_type" required:"false"`
- managedImageStorageAccountType compute.StorageAccountTypes
+ // type for a managed image. Valid values are Standard_LRS and Premium_LRS.
+ // The default is Standard_LRS.
+ ManagedImageStorageAccountType string `mapstructure:"managed_image_storage_account_type" required:"false"`
+ managedImageStorageAccountType compute.StorageAccountTypes
// If
- // managed_image_os_disk_snapshot_name is set, a snapshot of the OS disk
- // is created with the same name as this value before the VM is captured.
- ManagedImageOSDiskSnapshotName string `mapstructure:"managed_image_os_disk_snapshot_name" required:"false"`
+ // managed_image_os_disk_snapshot_name is set, a snapshot of the OS disk
+ // is created with the same name as this value before the VM is captured.
+ ManagedImageOSDiskSnapshotName string `mapstructure:"managed_image_os_disk_snapshot_name" required:"false"`
// If
- // managed_image_data_disk_snapshot_prefix is set, snapshot of the data
- // disk(s) is created with the same prefix as this value before the VM is
- // captured.
+ // managed_image_data_disk_snapshot_prefix is set, snapshot of the data
+ // disk(s) is created with the same prefix as this value before the VM is
+ // captured.
ManagedImageDataDiskSnapshotPrefix string `mapstructure:"managed_image_data_disk_snapshot_prefix" required:"false"`
manageImageLocation string
// Store the image in zone-resilient storage. You need to create it
- // in a region that supports availability zones.
- ManagedImageZoneResilient bool `mapstructure:"managed_image_zone_resilient" required:"false"`
+ // in a region that supports availability zones.
+ ManagedImageZoneResilient bool `mapstructure:"managed_image_zone_resilient" required:"false"`
// the user can define up to 15
- // tags. Tag names cannot exceed 512 characters, and tag values cannot exceed
- // 256 characters. Tags are applied to every resource deployed by a Packer
- // build, i.e. Resource Group, VM, NIC, VNET, Public IP, KeyVault, etc.
- AzureTags map[string]*string `mapstructure:"azure_tags" required:"false"`
- ResourceGroupName string `mapstructure:"resource_group_name"`
- StorageAccount string `mapstructure:"storage_account"`
+ // tags. Tag names cannot exceed 512 characters, and tag values cannot exceed
+ // 256 characters. Tags are applied to every resource deployed by a Packer
+ // build, i.e. Resource Group, VM, NIC, VNET, Public IP, KeyVault, etc.
+ AzureTags map[string]*string `mapstructure:"azure_tags" required:"false"`
+ ResourceGroupName string `mapstructure:"resource_group_name"`
+ StorageAccount string `mapstructure:"storage_account"`
// temporary name assigned to the VM. If this
- // value is not set, a random value will be assigned. Knowing the resource
- // group and VM name allows one to execute commands to update the VM during a
- // Packer build, e.g. attach a resource disk to the VM.
- TempComputeName string `mapstructure:"temp_compute_name" required:"false"`
- TempResourceGroupName string `mapstructure:"temp_resource_group_name"`
- BuildResourceGroupName string `mapstructure:"build_resource_group_name"`
- storageAccountBlobEndpoint string
+ // value is not set, a random value will be assigned. Knowing the resource
+ // group and VM name allows one to execute commands to update the VM during a
+ // Packer build, e.g. attach a resource disk to the VM.
+ TempComputeName string `mapstructure:"temp_compute_name" required:"false"`
+ TempResourceGroupName string `mapstructure:"temp_resource_group_name"`
+ BuildResourceGroupName string `mapstructure:"build_resource_group_name"`
+ storageAccountBlobEndpoint string
// This value allows you to
- // set a virtual_network_name and obtain a public IP. If this value is not
- // set and virtual_network_name is defined Packer is only allowed to be
- // executed from a host on the same subnet / virtual network.
- PrivateVirtualNetworkWithPublicIp bool `mapstructure:"private_virtual_network_with_public_ip" required:"false"`
+ // set a virtual_network_name and obtain a public IP. If this value is not
+ // set and virtual_network_name is defined Packer is only allowed to be
+ // executed from a host on the same subnet / virtual network.
+ PrivateVirtualNetworkWithPublicIp bool `mapstructure:"private_virtual_network_with_public_ip" required:"false"`
// Use a pre-existing virtual network for the
- // VM. This option enables private communication with the VM, no public IP
- // address is used or provisioned (unless you set
- // private_virtual_network_with_public_ip).
- VirtualNetworkName string `mapstructure:"virtual_network_name" required:"false"`
+ // VM. This option enables private communication with the VM, no public IP
+ // address is used or provisioned (unless you set
+ // private_virtual_network_with_public_ip).
+ VirtualNetworkName string `mapstructure:"virtual_network_name" required:"false"`
// If virtual_network_name is set,
- // this value may also be set. If virtual_network_name is set, and this
- // value is not set the builder attempts to determine the subnet to use with
- // the virtual network. If the subnet cannot be found, or it cannot be
- // disambiguated, this value should be set.
- VirtualNetworkSubnetName string `mapstructure:"virtual_network_subnet_name" required:"false"`
+ // this value may also be set. If virtual_network_name is set, and this
+ // value is not set the builder attempts to determine the subnet to use with
+ // the virtual network. If the subnet cannot be found, or it cannot be
+ // disambiguated, this value should be set.
+ VirtualNetworkSubnetName string `mapstructure:"virtual_network_subnet_name" required:"false"`
// If virtual_network_name is
- // set, this value may also be set. If virtual_network_name is set, and
- // this value is not set the builder attempts to determine the resource group
- // containing the virtual network. If the resource group cannot be found, or
- // it cannot be disambiguated, this value should be set.
- VirtualNetworkResourceGroupName string `mapstructure:"virtual_network_resource_group_name" required:"false"`
+ // set, this value may also be set. If virtual_network_name is set, and
+ // this value is not set the builder attempts to determine the resource group
+ // containing the virtual network. If the resource group cannot be found, or
+ // it cannot be disambiguated, this value should be set.
+ VirtualNetworkResourceGroupName string `mapstructure:"virtual_network_resource_group_name" required:"false"`
// Specify a file containing custom data to inject
- // into the cloud-init process. The contents of the file are read and injected
- // into the ARM template. The custom data will be passed to cloud-init for
- // processing at the time of provisioning. See
- // documentation
- // to learn more about custom data, and how it can be used to influence the
- // provisioning process.
- CustomDataFile string `mapstructure:"custom_data_file" required:"false"`
- customData string
+ // into the cloud-init process. The contents of the file are read and injected
+ // into the ARM template. The custom data will be passed to cloud-init for
+ // processing at the time of provisioning. See
+ // documentation
+ // to learn more about custom data, and how it can be used to influence the
+ // provisioning process.
+ CustomDataFile string `mapstructure:"custom_data_file" required:"false"`
+ customData string
// Used for creating images from Marketplace images.
- // Please refer to Deploy an image with Marketplace
- // terms for more details. Not
- // all Marketplace images support programmatic deployment, and support is
- // controlled by the image publisher.
- PlanInfo PlanInformation `mapstructure:"plan_info" required:"false"`
+ // Please refer to Deploy an image with Marketplace
+ // terms for more details. Not
+ // all Marketplace images support programmatic deployment, and support is
+ // controlled by the image publisher.
+ PlanInfo PlanInformation `mapstructure:"plan_info" required:"false"`
// If either Linux or Windows is specified Packer will
- // automatically configure authentication credentials for the provisioned
- // machine. For Linux this configures an SSH authorized key. For Windows
- // this configures a WinRM certificate.
- OSType string `mapstructure:"os_type" required:"false"`
+ // automatically configure authentication credentials for the provisioned
+ // machine. For Linux this configures an SSH authorized key. For Windows
+ // this configures a WinRM certificate.
+ OSType string `mapstructure:"os_type" required:"false"`
// Specify the size of the OS disk in GB
- // (gigabytes). Values of zero or less than zero are ignored.
- OSDiskSizeGB int32 `mapstructure:"os_disk_size_gb" required:"false"`
+ // (gigabytes). Values of zero or less than zero are ignored.
+ OSDiskSizeGB int32 `mapstructure:"os_disk_size_gb" required:"false"`
// The size(s) of any additional
- // hard disks for the VM in gigabytes. If this is not specified then the VM
- // will only contain an OS disk. The number of additional disks and maximum
- // size of a disk depends on the configuration of your VM. See
- // Windows
- // or
- // Linux
- // for more information.
+ // hard disks for the VM in gigabytes. If this is not specified then the VM
+ // will only contain an OS disk. The number of additional disks and maximum
+ // size of a disk depends on the configuration of your VM. See
+ // Windows
+ // or
+ // Linux
+ // for more information.
AdditionalDiskSize []int32 `mapstructure:"disk_additional_size" required:"false"`
// Specify the disk caching type. Valid values
- // are None, ReadOnly, and ReadWrite. The default value is ReadWrite.
- DiskCachingType string `mapstructure:"disk_caching_type" required:"false"`
- diskCachingType compute.CachingTypes
+ // are None, ReadOnly, and ReadWrite. The default value is ReadWrite.
+ DiskCachingType string `mapstructure:"disk_caching_type" required:"false"`
+ diskCachingType compute.CachingTypes
// Runtime Values
UserName string
@@ -257,9 +257,9 @@ type Config struct {
Comm communicator.Config `mapstructure:",squash"`
ctx interpolate.Context
// If you want packer to delete the
- // temporary resource group asynchronously set this value. It's a boolean
- // value and defaults to false. Important Setting this true means that
- // your builds are faster, however any failed deletes are not reported.
+ // temporary resource group asynchronously set this value. It's a boolean
+ // value and defaults to false. Important Setting this true means that
+ // your builds are faster, however any failed deletes are not reported.
AsyncResourceGroupDelete bool `mapstructure:"async_resourcegroup_delete" required:"false"`
}
diff --git a/builder/cloudstack/config.go b/builder/cloudstack/config.go
index 9e4e5c105..6c873cc60 100644
--- a/builder/cloudstack/config.go
+++ b/builder/cloudstack/config.go
@@ -22,130 +22,130 @@ type Config struct {
common.HTTPConfig `mapstructure:",squash"`
Comm communicator.Config `mapstructure:",squash"`
// The CloudStack API endpoint we will connect to. It can
- // also be specified via environment variable CLOUDSTACK_API_URL, if set.
- APIURL string `mapstructure:"api_url" required:"true"`
+ // also be specified via environment variable CLOUDSTACK_API_URL, if set.
+ APIURL string `mapstructure:"api_url" required:"true"`
// The API key used to sign all API requests. It can also
- // be specified via environment variable CLOUDSTACK_API_KEY, if set.
- APIKey string `mapstructure:"api_key" required:"true"`
+ // be specified via environment variable CLOUDSTACK_API_KEY, if set.
+ APIKey string `mapstructure:"api_key" required:"true"`
// The secret key used to sign all API requests. It
- // can also be specified via environment variable CLOUDSTACK_SECRET_KEY, if
- // set.
- SecretKey string `mapstructure:"secret_key" required:"true"`
+ // can also be specified via environment variable CLOUDSTACK_SECRET_KEY, if
+ // set.
+ SecretKey string `mapstructure:"secret_key" required:"true"`
// The time duration to wait for async calls to
- // finish. Defaults to 30m.
+ // finish. Defaults to 30m.
AsyncTimeout time.Duration `mapstructure:"async_timeout" required:"false"`
// Some cloud providers only allow HTTP GET calls
- // to their CloudStack API. If using such a provider, you need to set this to
- // true in order for the provider to only make GET calls and no POST calls.
- HTTPGetOnly bool `mapstructure:"http_get_only" required:"false"`
+ // to their CloudStack API. If using such a provider, you need to set this to
+ // true in order for the provider to only make GET calls and no POST calls.
+ HTTPGetOnly bool `mapstructure:"http_get_only" required:"false"`
// Set to true to skip SSL verification.
- // Defaults to false.
- SSLNoVerify bool `mapstructure:"ssl_no_verify" required:"false"`
+ // Defaults to false.
+ SSLNoVerify bool `mapstructure:"ssl_no_verify" required:"false"`
// List of CIDR's that will have access to the new
- // instance. This is needed in order for any provisioners to be able to
- // connect to the instance. Defaults to [ "0.0.0.0/0" ]. Only required when
- // use_local_ip_address is false.
- CIDRList []string `mapstructure:"cidr_list" required:"false"`
+ // instance. This is needed in order for any provisioners to be able to
+ // connect to the instance. Defaults to [ "0.0.0.0/0" ]. Only required when
+ // use_local_ip_address is false.
+ CIDRList []string `mapstructure:"cidr_list" required:"false"`
// If true a temporary security group
- // will be created which allows traffic towards the instance from the
- // cidr_list. This option will be ignored if security_groups is also
- // defined. Requires expunge set to true. Defaults to false.
- CreateSecurityGroup bool `mapstructure:"create_security_group" required:"false"`
+ // will be created which allows traffic towards the instance from the
+ // cidr_list. This option will be ignored if security_groups is also
+ // defined. Requires expunge set to true. Defaults to false.
+ CreateSecurityGroup bool `mapstructure:"create_security_group" required:"false"`
// The name or ID of the disk offering used for the
- // instance. This option is only available (and also required) when using
- // source_iso.
- DiskOffering string `mapstructure:"disk_offering" required:"false"`
+ // instance. This option is only available (and also required) when using
+ // source_iso.
+ DiskOffering string `mapstructure:"disk_offering" required:"false"`
// The size (in GB) of the root disk of the new
- // instance. This option is only available when using source_template.
- DiskSize int64 `mapstructure:"disk_size" required:"false"`
+ // instance. This option is only available when using source_template.
+ DiskSize int64 `mapstructure:"disk_size" required:"false"`
// Set to true to expunge the instance when it is
- // destroyed. Defaults to false.
- Expunge bool `mapstructure:"expunge" required:"false"`
+ // destroyed. Defaults to false.
+ Expunge bool `mapstructure:"expunge" required:"false"`
// The target hypervisor (e.g. XenServer, KVM) for
- // the new template. This option is required when using source_iso.
- Hypervisor string `mapstructure:"hypervisor" required:"false"`
+ // the new template. This option is required when using source_iso.
+ Hypervisor string `mapstructure:"hypervisor" required:"false"`
// The name of the instance. Defaults to
- // "packer-UUID" where UUID is dynamically generated.
- InstanceName string `mapstructure:"instance_name" required:"false"`
+ // "packer-UUID" where UUID is dynamically generated.
+ InstanceName string `mapstructure:"instance_name" required:"false"`
// The name or ID of the network to connect the instance
- // to.
- Network string `mapstructure:"network" required:"true"`
+ // to.
+ Network string `mapstructure:"network" required:"true"`
// The name or ID of the project to deploy the instance
- // to.
- Project string `mapstructure:"project" required:"false"`
+ // to.
+ Project string `mapstructure:"project" required:"false"`
// The public IP address or it's ID used for
- // connecting any provisioners to. If not provided, a temporary public IP
- // address will be associated and released during the Packer run.
- PublicIPAddress string `mapstructure:"public_ip_address" required:"false"`
+ // connecting any provisioners to. If not provided, a temporary public IP
+ // address will be associated and released during the Packer run.
+ PublicIPAddress string `mapstructure:"public_ip_address" required:"false"`
// The fixed port you want to configure in the port
- // forwarding rule. Set this attribute if you do not want to use the a random
- // public port.
- PublicPort int `mapstructure:"public_port" required:"false"`
+ // forwarding rule. Set this attribute if you do not want to use the a random
+ // public port.
+ PublicPort int `mapstructure:"public_port" required:"false"`
// A list of security group IDs or
- // names to associate the instance with.
- SecurityGroups []string `mapstructure:"security_groups" required:"false"`
+ // names to associate the instance with.
+ SecurityGroups []string `mapstructure:"security_groups" required:"false"`
// The name or ID of the service offering used
- // for the instance.
- ServiceOffering string `mapstructure:"service_offering" required:"true"`
+ // for the instance.
+ ServiceOffering string `mapstructure:"service_offering" required:"true"`
// Set to true to prevent network
- // ACLs or firewall rules creation. Defaults to false.
- PreventFirewallChanges bool `mapstructure:"prevent_firewall_changes" required:"false"`
+ // ACLs or firewall rules creation. Defaults to false.
+ PreventFirewallChanges bool `mapstructure:"prevent_firewall_changes" required:"false"`
// The name or ID of an ISO that will be mounted
- // before booting the instance. This option is mutually exclusive with
- // source_template. When using source_iso, both disk_offering and
- // hypervisor are required.
- SourceISO string `mapstructure:"source_iso" required:"true"`
+ // before booting the instance. This option is mutually exclusive with
+ // source_template. When using source_iso, both disk_offering and
+ // hypervisor are required.
+ SourceISO string `mapstructure:"source_iso" required:"true"`
// The name or ID of the template used as base
- // template for the instance. This option is mutually exclusive with
- // source_iso.
- SourceTemplate string `mapstructure:"source_template" required:"true"`
+ // template for the instance. This option is mutually exclusive with
+ // source_iso.
+ SourceTemplate string `mapstructure:"source_template" required:"true"`
// The name of the temporary SSH key pair
- // to generate. By default, Packer generates a name that looks like
- // packer_, where is a 36 character unique identifier.
- TemporaryKeypairName string `mapstructure:"temporary_keypair_name" required:"false"`
+ // to generate. By default, Packer generates a name that looks like
+ // packer_, where is a 36 character unique identifier.
+ TemporaryKeypairName string `mapstructure:"temporary_keypair_name" required:"false"`
// Set to true to indicate that the
- // provisioners should connect to the local IP address of the instance.
- UseLocalIPAddress bool `mapstructure:"use_local_ip_address" required:"false"`
+ // provisioners should connect to the local IP address of the instance.
+ UseLocalIPAddress bool `mapstructure:"use_local_ip_address" required:"false"`
// User data to launch with the instance. This is a
- // template engine see User Data bellow for
- // more details. Packer will not automatically wait for a user script to
- // finish before shutting down the instance this must be handled in a
- // provisioner.
- UserData string `mapstructure:"user_data" required:"false"`
+ // template engine see User Data bellow for
+ // more details. Packer will not automatically wait for a user script to
+ // finish before shutting down the instance this must be handled in a
+ // provisioner.
+ UserData string `mapstructure:"user_data" required:"false"`
// Path to a file that will be used for the user
- // data when launching the instance. This file will be parsed as a template
- // engine see User Data bellow for more
- // details.
- UserDataFile string `mapstructure:"user_data_file" required:"false"`
+ // data when launching the instance. This file will be parsed as a template
+ // engine see User Data bellow for more
+ // details.
+ UserDataFile string `mapstructure:"user_data_file" required:"false"`
// The name or ID of the zone where the instance will be
- // created.
- Zone string `mapstructure:"zone" required:"true"`
+ // created.
+ Zone string `mapstructure:"zone" required:"true"`
// The name of the new template. Defaults to
- // "packer-{{timestamp}}" where timestamp will be the current time.
- TemplateName string `mapstructure:"template_name" required:"false"`
+ // "packer-{{timestamp}}" where timestamp will be the current time.
+ TemplateName string `mapstructure:"template_name" required:"false"`
// The display text of the new template.
- // Defaults to the template_name.
- TemplateDisplayText string `mapstructure:"template_display_text" required:"false"`
+ // Defaults to the template_name.
+ TemplateDisplayText string `mapstructure:"template_display_text" required:"false"`
// The name or ID of the template OS for the new
- // template that will be created.
- TemplateOS string `mapstructure:"template_os" required:"true"`
+ // template that will be created.
+ TemplateOS string `mapstructure:"template_os" required:"true"`
// Set to true to indicate that the template
- // is featured. Defaults to false.
- TemplateFeatured bool `mapstructure:"template_featured" required:"false"`
+ // is featured. Defaults to false.
+ TemplateFeatured bool `mapstructure:"template_featured" required:"false"`
// Set to true to indicate that the template
- // is available for all accounts. Defaults to false.
- TemplatePublic bool `mapstructure:"template_public" required:"false"`
+ // is available for all accounts. Defaults to false.
+ TemplatePublic bool `mapstructure:"template_public" required:"false"`
// Set to true to indicate the
- // template should be password enabled. Defaults to false.
- TemplatePasswordEnabled bool `mapstructure:"template_password_enabled" required:"false"`
+ // template should be password enabled. Defaults to false.
+ TemplatePasswordEnabled bool `mapstructure:"template_password_enabled" required:"false"`
// Set to true to indicate the template
- // requires hardware-assisted virtualization. Defaults to false.
- TemplateRequiresHVM bool `mapstructure:"template_requires_hvm" required:"false"`
+ // requires hardware-assisted virtualization. Defaults to false.
+ TemplateRequiresHVM bool `mapstructure:"template_requires_hvm" required:"false"`
// Set to true to indicate that the template
- // contains tools to support dynamic scaling of VM cpu/memory. Defaults to
- // false.
- TemplateScalable bool `mapstructure:"template_scalable" required:"false"`
- TemplateTag string `mapstructure:"template_tag"`
+ // contains tools to support dynamic scaling of VM cpu/memory. Defaults to
+ // false.
+ TemplateScalable bool `mapstructure:"template_scalable" required:"false"`
+ TemplateTag string `mapstructure:"template_tag"`
Tags map[string]string `mapstructure:"tags"`
diff --git a/builder/digitalocean/config.go b/builder/digitalocean/config.go
index 970a16080..a03d15522 100644
--- a/builder/digitalocean/config.go
+++ b/builder/digitalocean/config.go
@@ -22,60 +22,60 @@ type Config struct {
common.PackerConfig `mapstructure:",squash"`
Comm communicator.Config `mapstructure:",squash"`
// The client TOKEN to use to access your account. It
- // can also be specified via environment variable DIGITALOCEAN_API_TOKEN, if
- // set.
+ // can also be specified via environment variable DIGITALOCEAN_API_TOKEN, if
+ // set.
APIToken string `mapstructure:"api_token" required:"true"`
// Non standard api endpoint URL. Set this if you are
- // using a DigitalOcean API compatible service. It can also be specified via
- // environment variable DIGITALOCEAN_API_URL.
- APIURL string `mapstructure:"api_url" required:"false"`
+ // using a DigitalOcean API compatible service. It can also be specified via
+ // environment variable DIGITALOCEAN_API_URL.
+ APIURL string `mapstructure:"api_url" required:"false"`
// The name (or slug) of the region to launch the droplet
- // in. Consequently, this is the region where the snapshot will be available.
- // See
- // https://developers.digitalocean.com/documentation/v2/#list-all-regions
- // for the accepted region names/slugs.
+ // in. Consequently, this is the region where the snapshot will be available.
+ // See
+ // https://developers.digitalocean.com/documentation/v2/#list-all-regions
+ // for the accepted region names/slugs.
Region string `mapstructure:"region" required:"true"`
// The name (or slug) of the droplet size to use. See
- // https://developers.digitalocean.com/documentation/v2/#list-all-sizes
- // for the accepted size names/slugs.
- Size string `mapstructure:"size" required:"true"`
+ // https://developers.digitalocean.com/documentation/v2/#list-all-sizes
+ // for the accepted size names/slugs.
+ Size string `mapstructure:"size" required:"true"`
// The name (or slug) of the base image to use. This is the
- // image that will be used to launch a new droplet and provision it. See
- // https://developers.digitalocean.com/documentation/v2/#list-all-images
- // for details on how to get a list of the accepted image names/slugs.
- Image string `mapstructure:"image" required:"true"`
+ // image that will be used to launch a new droplet and provision it. See
+ // https://developers.digitalocean.com/documentation/v2/#list-all-images
+ // for details on how to get a list of the accepted image names/slugs.
+ Image string `mapstructure:"image" required:"true"`
// Set to true to enable private networking
- // for the droplet being created. This defaults to false, or not enabled.
- PrivateNetworking bool `mapstructure:"private_networking" required:"false"`
+ // for the droplet being created. This defaults to false, or not enabled.
+ PrivateNetworking bool `mapstructure:"private_networking" required:"false"`
// Set to true to enable monitoring for the droplet
- // being created. This defaults to false, or not enabled.
- Monitoring bool `mapstructure:"monitoring" required:"false"`
+ // being created. This defaults to false, or not enabled.
+ Monitoring bool `mapstructure:"monitoring" required:"false"`
// Set to true to enable ipv6 for the droplet being
- // created. This defaults to false, or not enabled.
- IPv6 bool `mapstructure:"ipv6" required:"false"`
+ // created. This defaults to false, or not enabled.
+ IPv6 bool `mapstructure:"ipv6" required:"false"`
// The name of the resulting snapshot that will
- // appear in your account. Defaults to "packer-{{timestamp}}" (see
- // configuration templates for more info).
- SnapshotName string `mapstructure:"snapshot_name" required:"false"`
+ // appear in your account. Defaults to "packer-{{timestamp}}" (see
+ // configuration templates for more info).
+ SnapshotName string `mapstructure:"snapshot_name" required:"false"`
// The regions of the resulting
- // snapshot that will appear in your account.
- SnapshotRegions []string `mapstructure:"snapshot_regions" required:"false"`
+ // snapshot that will appear in your account.
+ SnapshotRegions []string `mapstructure:"snapshot_regions" required:"false"`
// The time to wait, as a duration string, for a
- // droplet to enter a desired state (such as "active") before timing out. The
- // default state timeout is "6m".
- StateTimeout time.Duration `mapstructure:"state_timeout" required:"false"`
+ // droplet to enter a desired state (such as "active") before timing out. The
+ // default state timeout is "6m".
+ StateTimeout time.Duration `mapstructure:"state_timeout" required:"false"`
// The name assigned to the droplet. DigitalOcean
- // sets the hostname of the machine to this value.
- DropletName string `mapstructure:"droplet_name" required:"false"`
+ // sets the hostname of the machine to this value.
+ DropletName string `mapstructure:"droplet_name" required:"false"`
// User data to launch with the Droplet. Packer will
- // not automatically wait for a user script to finish before shutting down the
- // instance this must be handled in a provisioner.
- UserData string `mapstructure:"user_data" required:"false"`
+ // not automatically wait for a user script to finish before shutting down the
+ // instance this must be handled in a provisioner.
+ UserData string `mapstructure:"user_data" required:"false"`
// Path to a file that will be used for the user
- // data when launching the Droplet.
- UserDataFile string `mapstructure:"user_data_file" required:"false"`
+ // data when launching the Droplet.
+ UserDataFile string `mapstructure:"user_data_file" required:"false"`
// Tags to apply to the droplet when it is created
- Tags []string `mapstructure:"tags" required:"false"`
+ Tags []string `mapstructure:"tags" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/docker/config.go b/builder/docker/config.go
index dbb041dc0..20feb885b 100644
--- a/builder/docker/config.go
+++ b/builder/docker/config.go
@@ -26,69 +26,69 @@ type Config struct {
common.PackerConfig `mapstructure:",squash"`
Comm communicator.Config `mapstructure:",squash"`
- Author string
- Changes []string
- Commit bool
+ Author string
+ Changes []string
+ Commit bool
// The directory inside container to mount temp
- // directory from host server for work file
- // provisioner. This defaults to
- // c:/packer-files on windows and /packer-files on other systems.
- ContainerDir string `mapstructure:"container_dir" required:"false"`
- Discard bool
+ // directory from host server for work file
+ // provisioner. This defaults to
+ // c:/packer-files on windows and /packer-files on other systems.
+ ContainerDir string `mapstructure:"container_dir" required:"false"`
+ Discard bool
// Username (UID) to run remote commands with. You can
- // also set the group name/ID if you want: (UID or UID:GID).
- // You may need this if you get permission errors trying to run the shell or
- // other provisioners.
- ExecUser string `mapstructure:"exec_user" required:"false"`
- ExportPath string `mapstructure:"export_path"`
- Image string
- Message string
+ // also set the group name/ID if you want: (UID or UID:GID).
+ // You may need this if you get permission errors trying to run the shell or
+ // other provisioners.
+ ExecUser string `mapstructure:"exec_user" required:"false"`
+ ExportPath string `mapstructure:"export_path"`
+ Image string
+ Message string
// If true, run the docker container with the
- // --privileged flag. This defaults to false if not set.
- Privileged bool `mapstructure:"privileged" required:"false"`
- Pty bool
- Pull bool
+ // --privileged flag. This defaults to false if not set.
+ Privileged bool `mapstructure:"privileged" required:"false"`
+ Pty bool
+ Pull bool
// An array of arguments to pass to
- // docker run in order to run the container. By default this is set to
- // ["-d", "-i", "-t", "--entrypoint=/bin/sh", "--", "{{.Image}}"] if you are
- // using a linux container, and
- // ["-d", "-i", "-t", "--entrypoint=powershell", "--", "{{.Image}}"] if you
- // are running a windows container. {{.Image}} is a template variable that
- // corresponds to the image template option. Passing the entrypoint option
- // this way will make it the default entrypoint of the resulting image, so
- // running docker run -it --rm will start the docker image from the
- // /bin/sh shell interpreter; you could run a script or another shell by
- // running docker run -it --rm -c /bin/bash. If your docker image
- // embeds a binary intended to be run often, you should consider changing the
- // default entrypoint to point to it.
- RunCommand []string `mapstructure:"run_command" required:"false"`
- Volumes map[string]string
+ // docker run in order to run the container. By default this is set to
+ // ["-d", "-i", "-t", "--entrypoint=/bin/sh", "--", "{{.Image}}"] if you are
+ // using a linux container, and
+ // ["-d", "-i", "-t", "--entrypoint=powershell", "--", "{{.Image}}"] if you
+ // are running a windows container. {{.Image}} is a template variable that
+ // corresponds to the image template option. Passing the entrypoint option
+ // this way will make it the default entrypoint of the resulting image, so
+ // running docker run -it --rm will start the docker image from the
+ // /bin/sh shell interpreter; you could run a script or another shell by
+ // running docker run -it --rm -c /bin/bash. If your docker image
+ // embeds a binary intended to be run often, you should consider changing the
+ // default entrypoint to point to it.
+ RunCommand []string `mapstructure:"run_command" required:"false"`
+ Volumes map[string]string
// If true, files uploaded to the container
- // will be owned by the user the container is running as. If false, the owner
- // will depend on the version of docker installed in the system. Defaults to
- // true.
- FixUploadOwner bool `mapstructure:"fix_upload_owner" required:"false"`
+ // will be owned by the user the container is running as. If false, the owner
+ // will depend on the version of docker installed in the system. Defaults to
+ // true.
+ FixUploadOwner bool `mapstructure:"fix_upload_owner" required:"false"`
// If "true", tells Packer that you are building a
- // Windows container running on a windows host. This is necessary for building
- // Windows containers, because our normal docker bindings do not work for them.
+ // Windows container running on a windows host. This is necessary for building
+ // Windows containers, because our normal docker bindings do not work for them.
WindowsContainer bool `mapstructure:"windows_container" required:"false"`
// This is used to login to dockerhub to pull a private base container. For
// pushing to dockerhub, see the docker post-processors
- Login bool
+ Login bool
// The password to use to authenticate to login.
- LoginPassword string `mapstructure:"login_password" required:"false"`
+ LoginPassword string `mapstructure:"login_password" required:"false"`
// The server address to login to.
- LoginServer string `mapstructure:"login_server" required:"false"`
+ LoginServer string `mapstructure:"login_server" required:"false"`
// The username to use to authenticate to login.
- LoginUsername string `mapstructure:"login_username" required:"false"`
+ LoginUsername string `mapstructure:"login_username" required:"false"`
// Defaults to false. If true, the builder will login
- // in order to pull the image from Amazon EC2 Container Registry
- // (ECR). The builder only logs in for the
- // duration of the pull. If true login_server is required and login,
- // login_username, and login_password will be ignored. For more
- // information see the section on ECR.
- EcrLogin bool `mapstructure:"ecr_login" required:"false"`
+ // in order to pull the image from Amazon EC2 Container Registry
+ // (ECR). The builder only logs in for the
+ // duration of the pull. If true login_server is required and login,
+ // login_username, and login_password will be ignored. For more
+ // information see the section on ECR.
+ EcrLogin bool `mapstructure:"ecr_login" required:"false"`
AwsAccessConfig `mapstructure:",squash"`
ctx interpolate.Context
diff --git a/builder/docker/ecr_login.go b/builder/docker/ecr_login.go
index 9438f15e0..f553d6021 100644
--- a/builder/docker/ecr_login.go
+++ b/builder/docker/ecr_login.go
@@ -16,23 +16,23 @@ import (
type AwsAccessConfig struct {
// The AWS access key used to communicate with
- // AWS. Learn how to set
- // this.
+ // AWS. Learn how to set
+ // this.
AccessKey string `mapstructure:"aws_access_key" required:"false"`
// The AWS secret key used to communicate with
- // AWS. Learn how to set
- // this.
+ // AWS. Learn how to set
+ // this.
SecretKey string `mapstructure:"aws_secret_key" required:"false"`
// The AWS access token to use. This is different from
- // the access key and secret key. If you're not sure what this is, then you
- // probably don't need it. This will also be read from the AWS_SESSION_TOKEN
- // environmental variable.
- Token string `mapstructure:"aws_token" required:"false"`
+ // the access key and secret key. If you're not sure what this is, then you
+ // probably don't need it. This will also be read from the AWS_SESSION_TOKEN
+ // environmental variable.
+ Token string `mapstructure:"aws_token" required:"false"`
// The AWS shared credentials profile used to
- // communicate with AWS. Learn how to set
- // this.
- Profile string `mapstructure:"aws_profile" required:"false"`
- cfg *common.AccessConfig
+ // communicate with AWS. Learn how to set
+ // this.
+ Profile string `mapstructure:"aws_profile" required:"false"`
+ cfg *common.AccessConfig
}
// Get a login token for Amazon AWS ECR. Returns username and password
diff --git a/builder/googlecompute/config.go b/builder/googlecompute/config.go
index 8748e1559..123d36c70 100644
--- a/builder/googlecompute/config.go
+++ b/builder/googlecompute/config.go
@@ -27,137 +27,137 @@ type Config struct {
common.PackerConfig `mapstructure:",squash"`
Comm communicator.Config `mapstructure:",squash"`
// The JSON file containing your account
- // credentials. Not required if you run Packer on a GCE instance with a
- // service account. Instructions for creating the file or using service
- // accounts are above.
+ // credentials. Not required if you run Packer on a GCE instance with a
+ // service account. Instructions for creating the file or using service
+ // accounts are above.
AccountFile string `mapstructure:"account_file" required:"false"`
// The project ID that will be used to launch
- // instances and store images.
- ProjectId string `mapstructure:"project_id" required:"true"`
+ // instances and store images.
+ ProjectId string `mapstructure:"project_id" required:"true"`
// Full or partial URL of the guest accelerator
- // type. GPU accelerators can only be used with
- // "on_host_maintenance": "TERMINATE" option set. Example:
- // "projects/project_id/zones/europe-west1-b/acceleratorTypes/nvidia-tesla-k80"
- AcceleratorType string `mapstructure:"accelerator_type" required:"false"`
+ // type. GPU accelerators can only be used with
+ // "on_host_maintenance": "TERMINATE" option set. Example:
+ // "projects/project_id/zones/europe-west1-b/acceleratorTypes/nvidia-tesla-k80"
+ AcceleratorType string `mapstructure:"accelerator_type" required:"false"`
// Number of guest accelerator cards to add to
- // the launched instance.
- AcceleratorCount int64 `mapstructure:"accelerator_count" required:"false"`
+ // the launched instance.
+ AcceleratorCount int64 `mapstructure:"accelerator_count" required:"false"`
// The name of a pre-allocated static external IP
- // address. Note, must be the name and not the actual IP address.
- Address string `mapstructure:"address" required:"false"`
+ // address. Note, must be the name and not the actual IP address.
+ Address string `mapstructure:"address" required:"false"`
// If true, the default service
- // account will not be used if service_account_email is not specified. Set
- // this value to true and omit service_account_email to provision a VM with
- // no service account.
- DisableDefaultServiceAccount bool `mapstructure:"disable_default_service_account" required:"false"`
+ // account will not be used if service_account_email is not specified. Set
+ // this value to true and omit service_account_email to provision a VM with
+ // no service account.
+ DisableDefaultServiceAccount bool `mapstructure:"disable_default_service_account" required:"false"`
// The name of the disk, if unset the instance name
- // will be used.
- DiskName string `mapstructure:"disk_name" required:"false"`
+ // will be used.
+ DiskName string `mapstructure:"disk_name" required:"false"`
// The size of the disk in GB. This defaults to 10,
- // which is 10GB.
- DiskSizeGb int64 `mapstructure:"disk_size" required:"false"`
+ // which is 10GB.
+ DiskSizeGb int64 `mapstructure:"disk_size" required:"false"`
// Type of disk used to back your instance, like
- // pd-ssd or pd-standard. Defaults to pd-standard.
- DiskType string `mapstructure:"disk_type" required:"false"`
+ // pd-ssd or pd-standard. Defaults to pd-standard.
+ DiskType string `mapstructure:"disk_type" required:"false"`
// The unique name of the resulting image. Defaults to
- // "packer-{{timestamp}}".
- ImageName string `mapstructure:"image_name" required:"false"`
+ // "packer-{{timestamp}}".
+ ImageName string `mapstructure:"image_name" required:"false"`
// The description of the resulting image.
- ImageDescription string `mapstructure:"image_description" required:"false"`
+ ImageDescription string `mapstructure:"image_description" required:"false"`
// Image encryption key to apply to the created image. Possible values:
- ImageEncryptionKey *compute.CustomerEncryptionKey `mapstructure:"image_encryption_key" required:"false"`
+ ImageEncryptionKey *compute.CustomerEncryptionKey `mapstructure:"image_encryption_key" required:"false"`
// The name of the image family to which the
- // resulting image belongs. You can create disks by specifying an image family
- // instead of a specific image name. The image family always returns its
- // latest image that is not deprecated.
- ImageFamily string `mapstructure:"image_family" required:"false"`
+ // resulting image belongs. You can create disks by specifying an image family
+ // instead of a specific image name. The image family always returns its
+ // latest image that is not deprecated.
+ ImageFamily string `mapstructure:"image_family" required:"false"`
// Key/value pair labels to
- // apply to the created image.
- ImageLabels map[string]string `mapstructure:"image_labels" required:"false"`
+ // apply to the created image.
+ ImageLabels map[string]string `mapstructure:"image_labels" required:"false"`
// Licenses to apply to the created
- // image.
- ImageLicenses []string `mapstructure:"image_licenses" required:"false"`
+ // image.
+ ImageLicenses []string `mapstructure:"image_licenses" required:"false"`
// A name to give the launched instance. Beware
- // that this must be unique. Defaults to "packer-{{uuid}}".
- InstanceName string `mapstructure:"instance_name" required:"false"`
+ // that this must be unique. Defaults to "packer-{{uuid}}".
+ InstanceName string `mapstructure:"instance_name" required:"false"`
// Key/value pair labels to apply to
- // the launched instance.
- Labels map[string]string `mapstructure:"labels" required:"false"`
+ // the launched instance.
+ Labels map[string]string `mapstructure:"labels" required:"false"`
// The machine type. Defaults to "n1-standard-1".
- MachineType string `mapstructure:"machine_type" required:"false"`
+ MachineType string `mapstructure:"machine_type" required:"false"`
// Metadata applied to the launched
- // instance.
- Metadata map[string]string `mapstructure:"metadata" required:"false"`
+ // instance.
+ Metadata map[string]string `mapstructure:"metadata" required:"false"`
// A Minimum CPU Platform for VM Instance.
- // Availability and default CPU platforms vary across zones, based on the
- // hardware available in each GCP zone.
- // Details
- MinCpuPlatform string `mapstructure:"min_cpu_platform" required:"false"`
+ // Availability and default CPU platforms vary across zones, based on the
+ // hardware available in each GCP zone.
+ // Details
+ MinCpuPlatform string `mapstructure:"min_cpu_platform" required:"false"`
// The Google Compute network id or URL to use for the
- // launched instance. Defaults to "default". If the value is not a URL, it
- // will be interpolated to
- // projects/((network_project_id))/global/networks/((network)). This value
- // is not required if a subnet is specified.
- Network string `mapstructure:"network" required:"false"`
+ // launched instance. Defaults to "default". If the value is not a URL, it
+ // will be interpolated to
+ // projects/((network_project_id))/global/networks/((network)). This value
+ // is not required if a subnet is specified.
+ Network string `mapstructure:"network" required:"false"`
// The project ID for the network and
- // subnetwork to use for launched instance. Defaults to project_id.
- NetworkProjectId string `mapstructure:"network_project_id" required:"false"`
+ // subnetwork to use for launched instance. Defaults to project_id.
+ NetworkProjectId string `mapstructure:"network_project_id" required:"false"`
// If true, the instance will not have an
- // external IP. use_internal_ip must be true if this property is true.
- OmitExternalIP bool `mapstructure:"omit_external_ip" required:"false"`
+ // external IP. use_internal_ip must be true if this property is true.
+ OmitExternalIP bool `mapstructure:"omit_external_ip" required:"false"`
// Sets Host Maintenance Option. Valid
- // choices are MIGRATE and TERMINATE. Please see GCE Instance Scheduling
- // Options,
- // as not all machine_types support MIGRATE (i.e. machines with GPUs). If
- // preemptible is true this can only be TERMINATE. If preemptible is false,
- // it defaults to MIGRATE
- OnHostMaintenance string `mapstructure:"on_host_maintenance" required:"false"`
+ // choices are MIGRATE and TERMINATE. Please see GCE Instance Scheduling
+ // Options,
+ // as not all machine_types support MIGRATE (i.e. machines with GPUs). If
+ // preemptible is true this can only be TERMINATE. If preemptible is false,
+ // it defaults to MIGRATE
+ OnHostMaintenance string `mapstructure:"on_host_maintenance" required:"false"`
// If true, launch a preemptible instance.
- Preemptible bool `mapstructure:"preemptible" required:"false"`
+ Preemptible bool `mapstructure:"preemptible" required:"false"`
// The time to wait for instance state changes.
- // Defaults to "5m".
- RawStateTimeout string `mapstructure:"state_timeout" required:"false"`
+ // Defaults to "5m".
+ RawStateTimeout string `mapstructure:"state_timeout" required:"false"`
// The region in which to launch the instance. Defaults to
- // the region hosting the specified zone.
- Region string `mapstructure:"region" required:"false"`
+ // the region hosting the specified zone.
+ Region string `mapstructure:"region" required:"false"`
// The service account scopes for launched
- // instance. Defaults to:
- Scopes []string `mapstructure:"scopes" required:"false"`
+ // instance. Defaults to:
+ Scopes []string `mapstructure:"scopes" required:"false"`
// The service account to be used for
- // launched instance. Defaults to the project's default service account unless
- // disable_default_service_account is true.
- ServiceAccountEmail string `mapstructure:"service_account_email" required:"false"`
+ // launched instance. Defaults to the project's default service account unless
+ // disable_default_service_account is true.
+ ServiceAccountEmail string `mapstructure:"service_account_email" required:"false"`
// The source image to use to create the new image
- // from. You can also specify source_image_family instead. If both
- // source_image and source_image_family are specified, source_image
- // takes precedence. Example: "debian-8-jessie-v20161027"
- SourceImage string `mapstructure:"source_image" required:"true"`
+ // from. You can also specify source_image_family instead. If both
+ // source_image and source_image_family are specified, source_image
+ // takes precedence. Example: "debian-8-jessie-v20161027"
+ SourceImage string `mapstructure:"source_image" required:"true"`
// The source image family to use to create
- // the new image from. The image family always returns its latest image that
- // is not deprecated. Example: "debian-8".
- SourceImageFamily string `mapstructure:"source_image_family" required:"true"`
+ // the new image from. The image family always returns its latest image that
+ // is not deprecated. Example: "debian-8".
+ SourceImageFamily string `mapstructure:"source_image_family" required:"true"`
// The project ID of the project
- // containing the source image.
- SourceImageProjectId string `mapstructure:"source_image_project_id" required:"false"`
+ // containing the source image.
+ SourceImageProjectId string `mapstructure:"source_image_project_id" required:"false"`
// The path to a startup script to run on the
- // VM from which the image will be made.
- StartupScriptFile string `mapstructure:"startup_script_file" required:"false"`
+ // VM from which the image will be made.
+ StartupScriptFile string `mapstructure:"startup_script_file" required:"false"`
// The Google Compute subnetwork id or URL to use for
- // the launched instance. Only required if the network has been created with
- // custom subnetting. Note, the region of the subnetwork must match the
- // region or zone in which the VM is launched. If the value is not a URL,
- // it will be interpolated to
- // projects/((network_project_id))/regions/((region))/subnetworks/((subnetwork))
- Subnetwork string `mapstructure:"subnetwork" required:"false"`
+ // the launched instance. Only required if the network has been created with
+ // custom subnetting. Note, the region of the subnetwork must match the
+ // region or zone in which the VM is launched. If the value is not a URL,
+ // it will be interpolated to
+ // projects/((network_project_id))/regions/((region))/subnetworks/((subnetwork))
+ Subnetwork string `mapstructure:"subnetwork" required:"false"`
// Assign network tags to apply firewall rules to
- // VM instance.
- Tags []string `mapstructure:"tags" required:"false"`
+ // VM instance.
+ Tags []string `mapstructure:"tags" required:"false"`
// If true, use the instance's internal IP
- // instead of its external IP during building.
- UseInternalIP bool `mapstructure:"use_internal_ip" required:"false"`
+ // instead of its external IP during building.
+ UseInternalIP bool `mapstructure:"use_internal_ip" required:"false"`
// The zone in which to launch the instance used to create
- // the image. Example: "us-central1-a"
- Zone string `mapstructure:"zone" required:"true"`
+ // the image. Example: "us-central1-a"
+ Zone string `mapstructure:"zone" required:"true"`
Account AccountFile
stateTimeout time.Duration
diff --git a/builder/hyperone/config.go b/builder/hyperone/config.go
index ea843eab9..4dfa9d573 100644
--- a/builder/hyperone/config.go
+++ b/builder/hyperone/config.go
@@ -35,59 +35,59 @@ type Config struct {
common.PackerConfig `mapstructure:",squash"`
Comm communicator.Config `mapstructure:",squash"`
// Custom API endpoint URL, compatible with HyperOne.
- // It can also be specified via environment variable HYPERONE_API_URL.
- APIURL string `mapstructure:"api_url" required:"false"`
+ // It can also be specified via environment variable HYPERONE_API_URL.
+ APIURL string `mapstructure:"api_url" required:"false"`
// The authentication token used to access your account.
- // This can be either a session token or a service account token.
- // If not defined, the builder will attempt to find it in the following order:
- Token string `mapstructure:"token" required:"true"`
+ // This can be either a session token or a service account token.
+ // If not defined, the builder will attempt to find it in the following order:
+ Token string `mapstructure:"token" required:"true"`
// The id or name of the project. This field is required
- // only if using session tokens. It should be skipped when using service
- // account authentication.
- Project string `mapstructure:"project" required:"true"`
+ // only if using session tokens. It should be skipped when using service
+ // account authentication.
+ Project string `mapstructure:"project" required:"true"`
// Login (an e-mail) on HyperOne platform. Set this
- // if you want to fetch the token by SSH authentication.
+ // if you want to fetch the token by SSH authentication.
TokenLogin string `mapstructure:"token_login" required:"false"`
// Timeout for waiting on the API to complete
- // a request. Defaults to 5m.
+ // a request. Defaults to 5m.
StateTimeout time.Duration `mapstructure:"state_timeout" required:"false"`
// ID or name of the image to launch server from.
- SourceImage string `mapstructure:"source_image" required:"true"`
+ SourceImage string `mapstructure:"source_image" required:"true"`
// The name of the resulting image. Defaults to
- // "packer-{{timestamp}}"
- // (see configuration templates for more info).
- ImageName string `mapstructure:"image_name" required:"false"`
+ // "packer-{{timestamp}}"
+ // (see configuration templates for more info).
+ ImageName string `mapstructure:"image_name" required:"false"`
// The description of the resulting image.
- ImageDescription string `mapstructure:"image_description" required:"false"`
+ ImageDescription string `mapstructure:"image_description" required:"false"`
// Key/value pair tags to
- // add to the created image.
- ImageTags map[string]interface{} `mapstructure:"image_tags" required:"false"`
+ // add to the created image.
+ ImageTags map[string]interface{} `mapstructure:"image_tags" required:"false"`
// The service of the resulting image.
- ImageService string `mapstructure:"image_service" required:"false"`
+ ImageService string `mapstructure:"image_service" required:"false"`
// ID or name of the type this server should be created with.
- VmType string `mapstructure:"vm_type" required:"true"`
+ VmType string `mapstructure:"vm_type" required:"true"`
// The name of the created server.
- VmName string `mapstructure:"vm_name" required:"false"`
+ VmName string `mapstructure:"vm_name" required:"false"`
// Key/value pair tags to
- // add to the created server.
+ // add to the created server.
VmTags map[string]interface{} `mapstructure:"vm_tags" required:"false"`
// The name of the created disk.
- DiskName string `mapstructure:"disk_name" required:"false"`
+ DiskName string `mapstructure:"disk_name" required:"false"`
// The type of the created disk. Defaults to ssd.
- DiskType string `mapstructure:"disk_type" required:"false"`
+ DiskType string `mapstructure:"disk_type" required:"false"`
// Size of the created disk, in GiB.
DiskSize float32 `mapstructure:"disk_size" required:"true"`
// The ID of the network to attach to the created server.
- Network string `mapstructure:"network" required:"false"`
+ Network string `mapstructure:"network" required:"false"`
// The ID of the private IP within chosen network
- // that should be assigned to the created server.
- PrivateIP string `mapstructure:"private_ip" required:"false"`
+ // that should be assigned to the created server.
+ PrivateIP string `mapstructure:"private_ip" required:"false"`
// The ID of the public IP that should be assigned to
- // the created server. If network is chosen, the public IP will be associated
- // with server's private IP.
- PublicIP string `mapstructure:"public_ip" required:"false"`
+ // the created server. If network is chosen, the public IP will be associated
+ // with server's private IP.
+ PublicIP string `mapstructure:"public_ip" required:"false"`
// Custom service of public network adapter.
- // Can be useful when using custom api_url. Defaults to public.
+ // Can be useful when using custom api_url. Defaults to public.
PublicNetAdpService string `mapstructure:"public_netadp_service" required:"false"`
ChrootDisk bool `mapstructure:"chroot_disk"`
@@ -103,12 +103,12 @@ type Config struct {
PreMountCommands []string `mapstructure:"pre_mount_commands"`
PostMountCommands []string `mapstructure:"post_mount_commands"`
// List of SSH keys by name or id to be added
- // to the server on launch.
- SSHKeys []string `mapstructure:"ssh_keys" required:"false"`
+ // to the server on launch.
+ SSHKeys []string `mapstructure:"ssh_keys" required:"false"`
// User data to launch with the server. Packer will not
- // automatically wait for a user script to finish before shutting down the
- // instance, this must be handled in a provisioner.
- UserData string `mapstructure:"user_data" required:"false"`
+ // automatically wait for a user script to finish before shutting down the
+ // instance, this must be handled in a provisioner.
+ UserData string `mapstructure:"user_data" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/hyperv/common/output_config.go b/builder/hyperv/common/output_config.go
index bd8f68045..fbf9cd7ac 100644
--- a/builder/hyperv/common/output_config.go
+++ b/builder/hyperv/common/output_config.go
@@ -11,12 +11,12 @@ import (
type OutputConfig struct {
// This setting specifies the directory that
- // artifacts from the build, such as the virtual machine files and disks,
- // will be output to. The path to the directory may be relative or
- // absolute. If relative, the path is relative to the working directory
- // packer is executed from. This directory must not exist or, if
- // created, must be empty prior to running the builder. By default this is
- // "output-BUILDNAME" where "BUILDNAME" is the name of the build.
+ // artifacts from the build, such as the virtual machine files and disks,
+ // will be output to. The path to the directory may be relative or
+ // absolute. If relative, the path is relative to the working directory
+ // packer is executed from. This directory must not exist or, if
+ // created, must be empty prior to running the builder. By default this is
+ // "output-BUILDNAME" where "BUILDNAME" is the name of the build.
OutputDir string `mapstructure:"output_directory" required:"false"`
}
diff --git a/builder/hyperv/common/shutdown_config.go b/builder/hyperv/common/shutdown_config.go
index 35d8b8266..ef028ee51 100644
--- a/builder/hyperv/common/shutdown_config.go
+++ b/builder/hyperv/common/shutdown_config.go
@@ -11,18 +11,18 @@ import (
type ShutdownConfig struct {
// The command to use to gracefully shut down
- // the machine once all provisioning is complete. By default this is an
- // empty string, which tells Packer to just forcefully shut down the
- // machine. This setting can be safely omitted if for example, a shutdown
- // command to gracefully halt the machine is configured inside a
- // provisioning script. If one or more scripts require a reboot it is
- // suggested to leave this blank (since reboots may fail) and instead
- // specify the final shutdown command in your last script.
- ShutdownCommand string `mapstructure:"shutdown_command" required:"false"`
+ // the machine once all provisioning is complete. By default this is an
+ // empty string, which tells Packer to just forcefully shut down the
+ // machine. This setting can be safely omitted if for example, a shutdown
+ // command to gracefully halt the machine is configured inside a
+ // provisioning script. If one or more scripts require a reboot it is
+ // suggested to leave this blank (since reboots may fail) and instead
+ // specify the final shutdown command in your last script.
+ ShutdownCommand string `mapstructure:"shutdown_command" required:"false"`
// The amount of time to wait after executing
- // the shutdown_command for the virtual machine to actually shut down.
- // If the machine doesn't shut down in this time it is considered an
- // error. By default, the time out is "5m" (five minutes).
+ // the shutdown_command for the virtual machine to actually shut down.
+ // If the machine doesn't shut down in this time it is considered an
+ // error. By default, the time out is "5m" (five minutes).
RawShutdownTimeout string `mapstructure:"shutdown_timeout" required:"false"`
ShutdownTimeout time.Duration ``
diff --git a/builder/hyperv/iso/builder.go b/builder/hyperv/iso/builder.go
index 29d510a9f..89d78344d 100644
--- a/builder/hyperv/iso/builder.go
+++ b/builder/hyperv/iso/builder.go
@@ -61,131 +61,131 @@ type Config struct {
hypervcommon.SSHConfig `mapstructure:",squash"`
hypervcommon.ShutdownConfig `mapstructure:",squash"`
// The size, in megabytes, of the hard disk to create
- // for the VM. By default, this is 40 GB.
+ // for the VM. By default, this is 40 GB.
DiskSize uint `mapstructure:"disk_size" required:"false"`
// The block size of the VHD to be created.
- // Recommended disk block size for Linux hyper-v guests is 1 MiB. This
- // defaults to "32 MiB".
+ // Recommended disk block size for Linux hyper-v guests is 1 MiB. This
+ // defaults to "32 MiB".
DiskBlockSize uint `mapstructure:"disk_block_size" required:"false"`
// The amount, in megabytes, of RAM to assign to the
- // VM. By default, this is 1 GB.
+ // VM. By default, this is 1 GB.
RamSize uint `mapstructure:"memory" required:"false"`
// A list of ISO paths to
- // attach to a VM when it is booted. This is most useful for unattended
- // Windows installs, which look for an Autounattend.xml file on removable
- // media. By default, no secondary ISO will be attached.
+ // attach to a VM when it is booted. This is most useful for unattended
+ // Windows installs, which look for an Autounattend.xml file on removable
+ // media. By default, no secondary ISO will be attached.
SecondaryDvdImages []string `mapstructure:"secondary_iso_images" required:"false"`
// If set to attach then attach and
- // mount the ISO image specified in guest_additions_path. If set to
- // none then guest additions are not attached and mounted; This is the
- // default.
+ // mount the ISO image specified in guest_additions_path. If set to
+ // none then guest additions are not attached and mounted; This is the
+ // default.
GuestAdditionsMode string `mapstructure:"guest_additions_mode" required:"false"`
// The path to the ISO image for guest
- // additions.
+ // additions.
GuestAdditionsPath string `mapstructure:"guest_additions_path" required:"false"`
// This is the name of the new virtual machine,
- // without the file extension. By default this is "packer-BUILDNAME",
- // where "BUILDNAME" is the name of the build.
+ // without the file extension. By default this is "packer-BUILDNAME",
+ // where "BUILDNAME" is the name of the build.
VMName string `mapstructure:"vm_name" required:"false"`
// The name of the switch to connect the virtual
- // machine to. By default, leaving this value unset will cause Packer to
- // try and determine the switch to use by looking for an external switch
- // that is up and running.
- SwitchName string `mapstructure:"switch_name" required:"false"`
+ // machine to. By default, leaving this value unset will cause Packer to
+ // try and determine the switch to use by looking for an external switch
+ // that is up and running.
+ SwitchName string `mapstructure:"switch_name" required:"false"`
// This is the VLAN of the virtual switch's
- // network card. By default none is set. If none is set then a VLAN is not
- // set on the switch's network card. If this value is set it should match
- // the VLAN specified in by vlan_id.
- SwitchVlanId string `mapstructure:"switch_vlan_id" required:"false"`
+ // network card. By default none is set. If none is set then a VLAN is not
+ // set on the switch's network card. If this value is set it should match
+ // the VLAN specified in by vlan_id.
+ SwitchVlanId string `mapstructure:"switch_vlan_id" required:"false"`
// This allows a specific MAC address to be used on
- // the default virtual network card. The MAC address must be a string with
- // no delimiters, for example "0000deadbeef".
- MacAddress string `mapstructure:"mac_address" required:"false"`
+ // the default virtual network card. The MAC address must be a string with
+ // no delimiters, for example "0000deadbeef".
+ MacAddress string `mapstructure:"mac_address" required:"false"`
// This is the VLAN of the virtual machine's network
- // card for the new virtual machine. By default none is set. If none is set
- // then VLANs are not set on the virtual machine's network card.
- VlanId string `mapstructure:"vlan_id" required:"false"`
+ // card for the new virtual machine. By default none is set. If none is set
+ // then VLANs are not set on the virtual machine's network card.
+ VlanId string `mapstructure:"vlan_id" required:"false"`
// The number of CPUs the virtual machine should use. If
- // this isn't specified, the default is 1 CPU.
- Cpu uint `mapstructure:"cpus" required:"false"`
+ // this isn't specified, the default is 1 CPU.
+ Cpu uint `mapstructure:"cpus" required:"false"`
// The Hyper-V generation for the virtual machine. By
- // default, this is 1. Generation 2 Hyper-V virtual machines do not support
- // floppy drives. In this scenario use secondary_iso_images instead. Hard
- // drives and DVD drives will also be SCSI and not IDE.
- Generation uint `mapstructure:"generation" required:"false"`
+ // default, this is 1. Generation 2 Hyper-V virtual machines do not support
+ // floppy drives. In this scenario use secondary_iso_images instead. Hard
+ // drives and DVD drives will also be SCSI and not IDE.
+ Generation uint `mapstructure:"generation" required:"false"`
// If true enable MAC address spoofing
- // for the virtual machine. This defaults to false.
- EnableMacSpoofing bool `mapstructure:"enable_mac_spoofing" required:"false"`
+ // for the virtual machine. This defaults to false.
+ EnableMacSpoofing bool `mapstructure:"enable_mac_spoofing" required:"false"`
// If true use a legacy network adapter as the NIC.
- // This defaults to false. A legacy network adapter is fully emulated NIC, and is thus
- // supported by various exotic operating systems, but this emulation requires
- // additional overhead and should only be used if absolutely necessary.
- UseLegacyNetworkAdapter bool `mapstructure:"use_legacy_network_adapter" required:"false"`
+ // This defaults to false. A legacy network adapter is fully emulated NIC, and is thus
+ // supported by various exotic operating systems, but this emulation requires
+ // additional overhead and should only be used if absolutely necessary.
+ UseLegacyNetworkAdapter bool `mapstructure:"use_legacy_network_adapter" required:"false"`
// If true enable dynamic memory for
- // the virtual machine. This defaults to false.
- EnableDynamicMemory bool `mapstructure:"enable_dynamic_memory" required:"false"`
+ // the virtual machine. This defaults to false.
+ EnableDynamicMemory bool `mapstructure:"enable_dynamic_memory" required:"false"`
// If true enable secure boot for the
- // virtual machine. This defaults to false. See secure_boot_template
- // below for additional settings.
- EnableSecureBoot bool `mapstructure:"enable_secure_boot" required:"false"`
+ // virtual machine. This defaults to false. See secure_boot_template
+ // below for additional settings.
+ EnableSecureBoot bool `mapstructure:"enable_secure_boot" required:"false"`
// The secure boot template to be
- // configured. Valid values are "MicrosoftWindows" (Windows) or
- // "MicrosoftUEFICertificateAuthority" (Linux). This only takes effect if
- // enable_secure_boot is set to "true". This defaults to "MicrosoftWindows".
- SecureBootTemplate string `mapstructure:"secure_boot_template" required:"false"`
+ // configured. Valid values are "MicrosoftWindows" (Windows) or
+ // "MicrosoftUEFICertificateAuthority" (Linux). This only takes effect if
+ // enable_secure_boot is set to "true". This defaults to "MicrosoftWindows".
+ SecureBootTemplate string `mapstructure:"secure_boot_template" required:"false"`
// If true enable
- // virtualization extensions for the virtual machine. This defaults to
- // false. For nested virtualization you need to enable MAC spoofing,
- // disable dynamic memory and have at least 4GB of RAM assigned to the
- // virtual machine.
- EnableVirtualizationExtensions bool `mapstructure:"enable_virtualization_extensions" required:"false"`
+ // virtualization extensions for the virtual machine. This defaults to
+ // false. For nested virtualization you need to enable MAC spoofing,
+ // disable dynamic memory and have at least 4GB of RAM assigned to the
+ // virtual machine.
+ EnableVirtualizationExtensions bool `mapstructure:"enable_virtualization_extensions" required:"false"`
// The location under which Packer will create a
- // directory to house all the VM files and folders during the build.
- // By default %TEMP% is used which, for most systems, will evaluate to
- // %USERPROFILE%/AppData/Local/Temp.
- TempPath string `mapstructure:"temp_path" required:"false"`
+ // directory to house all the VM files and folders during the build.
+ // By default %TEMP% is used which, for most systems, will evaluate to
+ // %USERPROFILE%/AppData/Local/Temp.
+ TempPath string `mapstructure:"temp_path" required:"false"`
// This allows you to set the vm version when
- // calling New-VM to generate the vm.
- Version string `mapstructure:"configuration_version" required:"false"`
+ // calling New-VM to generate the vm.
+ Version string `mapstructure:"configuration_version" required:"false"`
// If "true", Packer will not delete the VM from
- // The Hyper-V manager.
- KeepRegistered bool `mapstructure:"keep_registered" required:"false"`
+ // The Hyper-V manager.
+ KeepRegistered bool `mapstructure:"keep_registered" required:"false"`
Communicator string `mapstructure:"communicator"`
// The size or sizes of any
- // additional hard disks for the VM in megabytes. If this is not specified
- // then the VM will only contain a primary hard disk. Additional drives
- // will be attached to the SCSI interface only. The builder uses
- // expandable rather than fixed-size virtual hard disks, so the actual
- // file representing the disk will not use the full size unless it is
- // full.
+ // additional hard disks for the VM in megabytes. If this is not specified
+ // then the VM will only contain a primary hard disk. Additional drives
+ // will be attached to the SCSI interface only. The builder uses
+ // expandable rather than fixed-size virtual hard disks, so the actual
+ // file representing the disk will not use the full size unless it is
+ // full.
AdditionalDiskSize []uint `mapstructure:"disk_additional_size" required:"false"`
// If true skip compacting the hard disk for
- // the virtual machine when exporting. This defaults to false.
+ // the virtual machine when exporting. This defaults to false.
SkipCompaction bool `mapstructure:"skip_compaction" required:"false"`
// If true Packer will skip the export of the VM.
- // If you are interested only in the VHD/VHDX files, you can enable this
- // option. The resulting VHD/VHDX file will be output to
- // /Virtual Hard Disks. By default this option is false
- // and Packer will export the VM to output_directory.
+ // If you are interested only in the VHD/VHDX files, you can enable this
+ // option. The resulting VHD/VHDX file will be output to
+ // /Virtual Hard Disks. By default this option is false
+ // and Packer will export the VM to output_directory.
SkipExport bool `mapstructure:"skip_export" required:"false"`
// If true enables differencing disks. Only
- // the changes will be written to the new disk. This is especially useful if
- // your source is a VHD/VHDX. This defaults to false.
+ // the changes will be written to the new disk. This is especially useful if
+ // your source is a VHD/VHDX. This defaults to false.
DifferencingDisk bool `mapstructure:"differencing_disk" required:"false"`
// If true, creates the boot disk on the
- // virtual machine as a fixed VHD format disk. The default is false, which
- // creates a dynamic VHDX format disk. This option requires setting
- // generation to 1, skip_compaction to true, and
- // differencing_disk to false. Additionally, any value entered for
- // disk_block_size will be ignored. The most likely use case for this
- // option is outputing a disk that is in the format required for upload to
- // Azure.
+ // virtual machine as a fixed VHD format disk. The default is false, which
+ // creates a dynamic VHDX format disk. This option requires setting
+ // generation to 1, skip_compaction to true, and
+ // differencing_disk to false. Additionally, any value entered for
+ // disk_block_size will be ignored. The most likely use case for this
+ // option is outputing a disk that is in the format required for upload to
+ // Azure.
FixedVHD bool `mapstructure:"use_fixed_vhd_format" required:"false"`
// Packer defaults to building Hyper-V virtual
- // machines by launching a GUI that shows the console of the machine being
- // built. When this value is set to true, the machine will start without a
- // console.
+ // machines by launching a GUI that shows the console of the machine being
+ // built. When this value is set to true, the machine will start without a
+ // console.
Headless bool `mapstructure:"headless" required:"false"`
ctx interpolate.Context
diff --git a/builder/hyperv/vmcx/builder.go b/builder/hyperv/vmcx/builder.go
index b8570bf76..03e0e2cd6 100644
--- a/builder/hyperv/vmcx/builder.go
+++ b/builder/hyperv/vmcx/builder.go
@@ -51,20 +51,20 @@ type Config struct {
hypervcommon.SSHConfig `mapstructure:",squash"`
hypervcommon.ShutdownConfig `mapstructure:",squash"`
// The amount, in megabytes, of RAM to assign to the
- // VM. By default, this is 1 GB.
+ // VM. By default, this is 1 GB.
RamSize uint `mapstructure:"memory" required:"false"`
// A list of ISO paths to
- // attach to a VM when it is booted. This is most useful for unattended
- // Windows installs, which look for an Autounattend.xml file on removable
- // media. By default, no secondary ISO will be attached.
+ // attach to a VM when it is booted. This is most useful for unattended
+ // Windows installs, which look for an Autounattend.xml file on removable
+ // media. By default, no secondary ISO will be attached.
SecondaryDvdImages []string `mapstructure:"secondary_iso_images" required:"false"`
// If set to attach then attach and
- // mount the ISO image specified in guest_additions_path. If set to
- // none then guest additions are not attached and mounted; This is the
- // default.
+ // mount the ISO image specified in guest_additions_path. If set to
+ // none then guest additions are not attached and mounted; This is the
+ // default.
GuestAdditionsMode string `mapstructure:"guest_additions_mode" required:"false"`
// The path to the ISO image for guest
- // additions.
+ // additions.
GuestAdditionsPath string `mapstructure:"guest_additions_path" required:"false"`
// This is the path to a directory containing an exported virtual machine.
@@ -73,100 +73,100 @@ type Config struct {
// This is the name of the virtual machine to clone from.
CloneFromVMName string `mapstructure:"clone_from_vm_name"`
// The name of a snapshot in the
- // source machine to use as a starting point for the clone. If the value
- // given is an empty string, the last snapshot present in the source will
- // be chosen as the starting point for the new VM.
+ // source machine to use as a starting point for the clone. If the value
+ // given is an empty string, the last snapshot present in the source will
+ // be chosen as the starting point for the new VM.
CloneFromSnapshotName string `mapstructure:"clone_from_snapshot_name" required:"false"`
// If set to true all snapshots
- // present in the source machine will be copied when the machine is
- // cloned. The final result of the build will be an exported virtual
- // machine that contains all the snapshots of the parent.
+ // present in the source machine will be copied when the machine is
+ // cloned. The final result of the build will be an exported virtual
+ // machine that contains all the snapshots of the parent.
CloneAllSnapshots bool `mapstructure:"clone_all_snapshots" required:"false"`
// This is the name of the new virtual machine,
- // without the file extension. By default this is "packer-BUILDNAME",
- // where "BUILDNAME" is the name of the build.
+ // without the file extension. By default this is "packer-BUILDNAME",
+ // where "BUILDNAME" is the name of the build.
VMName string `mapstructure:"vm_name" required:"false"`
// If true enables differencing disks. Only
- // the changes will be written to the new disk. This is especially useful if
- // your source is a VHD/VHDX. This defaults to false.
+ // the changes will be written to the new disk. This is especially useful if
+ // your source is a VHD/VHDX. This defaults to false.
DifferencingDisk bool `mapstructure:"differencing_disk" required:"false"`
// The name of the switch to connect the virtual
- // machine to. By default, leaving this value unset will cause Packer to
- // try and determine the switch to use by looking for an external switch
- // that is up and running.
- SwitchName string `mapstructure:"switch_name" required:"false"`
+ // machine to. By default, leaving this value unset will cause Packer to
+ // try and determine the switch to use by looking for an external switch
+ // that is up and running.
+ SwitchName string `mapstructure:"switch_name" required:"false"`
// When cloning a vm to build from, we run a powershell
- // Compare-VM command, which, depending on your version of Windows, may need
- // the "Copy" flag to be set to true or false. Defaults to "false". Command:
- CompareCopy bool `mapstructure:"copy_in_compare" required:"false"`
+ // Compare-VM command, which, depending on your version of Windows, may need
+ // the "Copy" flag to be set to true or false. Defaults to "false". Command:
+ CompareCopy bool `mapstructure:"copy_in_compare" required:"false"`
// This is the VLAN of the virtual switch's
- // network card. By default none is set. If none is set then a VLAN is not
- // set on the switch's network card. If this value is set it should match
- // the VLAN specified in by vlan_id.
- SwitchVlanId string `mapstructure:"switch_vlan_id" required:"false"`
+ // network card. By default none is set. If none is set then a VLAN is not
+ // set on the switch's network card. If this value is set it should match
+ // the VLAN specified in by vlan_id.
+ SwitchVlanId string `mapstructure:"switch_vlan_id" required:"false"`
// This allows a specific MAC address to be used on
- // the default virtual network card. The MAC address must be a string with
- // no delimiters, for example "0000deadbeef".
- MacAddress string `mapstructure:"mac_address" required:"false"`
+ // the default virtual network card. The MAC address must be a string with
+ // no delimiters, for example "0000deadbeef".
+ MacAddress string `mapstructure:"mac_address" required:"false"`
// This is the VLAN of the virtual machine's network
- // card for the new virtual machine. By default none is set. If none is set
- // then VLANs are not set on the virtual machine's network card.
- VlanId string `mapstructure:"vlan_id" required:"false"`
+ // card for the new virtual machine. By default none is set. If none is set
+ // then VLANs are not set on the virtual machine's network card.
+ VlanId string `mapstructure:"vlan_id" required:"false"`
// The number of CPUs the virtual machine should use. If
- // this isn't specified, the default is 1 CPU.
- Cpu uint `mapstructure:"cpus" required:"false"`
+ // this isn't specified, the default is 1 CPU.
+ Cpu uint `mapstructure:"cpus" required:"false"`
// The Hyper-V generation for the virtual machine. By
- // default, this is 1. Generation 2 Hyper-V virtual machines do not support
- // floppy drives. In this scenario use secondary_iso_images instead. Hard
- // drives and DVD drives will also be SCSI and not IDE.
- Generation uint `mapstructure:"generation" required:"false"`
+ // default, this is 1. Generation 2 Hyper-V virtual machines do not support
+ // floppy drives. In this scenario use secondary_iso_images instead. Hard
+ // drives and DVD drives will also be SCSI and not IDE.
+ Generation uint `mapstructure:"generation" required:"false"`
// If true enable MAC address spoofing
- // for the virtual machine. This defaults to false.
- EnableMacSpoofing bool `mapstructure:"enable_mac_spoofing" required:"false"`
+ // for the virtual machine. This defaults to false.
+ EnableMacSpoofing bool `mapstructure:"enable_mac_spoofing" required:"false"`
// If true enable dynamic memory for
- // the virtual machine. This defaults to false.
- EnableDynamicMemory bool `mapstructure:"enable_dynamic_memory" required:"false"`
+ // the virtual machine. This defaults to false.
+ EnableDynamicMemory bool `mapstructure:"enable_dynamic_memory" required:"false"`
// If true enable secure boot for the
- // virtual machine. This defaults to false. See secure_boot_template
- // below for additional settings.
- EnableSecureBoot bool `mapstructure:"enable_secure_boot" required:"false"`
+ // virtual machine. This defaults to false. See secure_boot_template
+ // below for additional settings.
+ EnableSecureBoot bool `mapstructure:"enable_secure_boot" required:"false"`
// The secure boot template to be
- // configured. Valid values are "MicrosoftWindows" (Windows) or
- // "MicrosoftUEFICertificateAuthority" (Linux). This only takes effect if
- // enable_secure_boot is set to "true". This defaults to "MicrosoftWindows".
- SecureBootTemplate string `mapstructure:"secure_boot_template" required:"false"`
+ // configured. Valid values are "MicrosoftWindows" (Windows) or
+ // "MicrosoftUEFICertificateAuthority" (Linux). This only takes effect if
+ // enable_secure_boot is set to "true". This defaults to "MicrosoftWindows".
+ SecureBootTemplate string `mapstructure:"secure_boot_template" required:"false"`
// If true enable
- // virtualization extensions for the virtual machine. This defaults to
- // false. For nested virtualization you need to enable MAC spoofing,
- // disable dynamic memory and have at least 4GB of RAM assigned to the
- // virtual machine.
- EnableVirtualizationExtensions bool `mapstructure:"enable_virtualization_extensions" required:"false"`
+ // virtualization extensions for the virtual machine. This defaults to
+ // false. For nested virtualization you need to enable MAC spoofing,
+ // disable dynamic memory and have at least 4GB of RAM assigned to the
+ // virtual machine.
+ EnableVirtualizationExtensions bool `mapstructure:"enable_virtualization_extensions" required:"false"`
// The location under which Packer will create a
- // directory to house all the VM files and folders during the build.
- // By default %TEMP% is used which, for most systems, will evaluate to
- // %USERPROFILE%/AppData/Local/Temp.
- TempPath string `mapstructure:"temp_path" required:"false"`
+ // directory to house all the VM files and folders during the build.
+ // By default %TEMP% is used which, for most systems, will evaluate to
+ // %USERPROFILE%/AppData/Local/Temp.
+ TempPath string `mapstructure:"temp_path" required:"false"`
// This allows you to set the vm version when
- // calling New-VM to generate the vm.
- Version string `mapstructure:"configuration_version" required:"false"`
+ // calling New-VM to generate the vm.
+ Version string `mapstructure:"configuration_version" required:"false"`
// If "true", Packer will not delete the VM from
- // The Hyper-V manager.
- KeepRegistered bool `mapstructure:"keep_registered" required:"false"`
+ // The Hyper-V manager.
+ KeepRegistered bool `mapstructure:"keep_registered" required:"false"`
Communicator string `mapstructure:"communicator"`
// If true skip compacting the hard disk for
- // the virtual machine when exporting. This defaults to false.
+ // the virtual machine when exporting. This defaults to false.
SkipCompaction bool `mapstructure:"skip_compaction" required:"false"`
// If true Packer will skip the export of the VM.
- // If you are interested only in the VHD/VHDX files, you can enable this
- // option. The resulting VHD/VHDX file will be output to
- // /Virtual Hard Disks. By default this option is false
- // and Packer will export the VM to output_directory.
+ // If you are interested only in the VHD/VHDX files, you can enable this
+ // option. The resulting VHD/VHDX file will be output to
+ // /Virtual Hard Disks. By default this option is false
+ // and Packer will export the VM to output_directory.
SkipExport bool `mapstructure:"skip_export" required:"false"`
// Packer defaults to building Hyper-V virtual
- // machines by launching a GUI that shows the console of the machine being
- // built. When this value is set to true, the machine will start without a
- // console.
+ // machines by launching a GUI that shows the console of the machine being
+ // built. When this value is set to true, the machine will start without a
+ // console.
Headless bool `mapstructure:"headless" required:"false"`
ctx interpolate.Context
diff --git a/builder/lxc/config.go b/builder/lxc/config.go
index ea9f1978c..d5fa44ed3 100644
--- a/builder/lxc/config.go
+++ b/builder/lxc/config.go
@@ -17,52 +17,52 @@ import (
type Config struct {
common.PackerConfig `mapstructure:",squash"`
// The path to the lxc configuration file.
- ConfigFile string `mapstructure:"config_file" required:"true"`
+ ConfigFile string `mapstructure:"config_file" required:"true"`
// The directory in which to save the exported
- // tar.gz. Defaults to output- in the current directory.
- OutputDir string `mapstructure:"output_directory" required:"false"`
+ // tar.gz. Defaults to output- in the current directory.
+ OutputDir string `mapstructure:"output_directory" required:"false"`
// The name of the LXC container. Usually stored
- // in /var/lib/lxc/containers/. Defaults to
- // packer-.
- ContainerName string `mapstructure:"container_name" required:"false"`
+ // in /var/lib/lxc/containers/. Defaults to
+ // packer-.
+ ContainerName string `mapstructure:"container_name" required:"false"`
// Allows you to specify a wrapper command, such
- // as ssh so you can execute packer builds on a remote host. Defaults to
- // Empty.
- CommandWrapper string `mapstructure:"command_wrapper" required:"false"`
+ // as ssh so you can execute packer builds on a remote host. Defaults to
+ // Empty.
+ CommandWrapper string `mapstructure:"command_wrapper" required:"false"`
// The timeout in seconds to wait for the the
- // container to start. Defaults to 20 seconds.
- RawInitTimeout string `mapstructure:"init_timeout" required:"false"`
+ // container to start. Defaults to 20 seconds.
+ RawInitTimeout string `mapstructure:"init_timeout" required:"false"`
// Options to pass to lxc-create. For
- // instance, you can specify a custom LXC container configuration file with
- // ["-f", "/path/to/lxc.conf"]. Defaults to []. See man 1 lxc-create for
- // available options.
- CreateOptions []string `mapstructure:"create_options" required:"false"`
+ // instance, you can specify a custom LXC container configuration file with
+ // ["-f", "/path/to/lxc.conf"]. Defaults to []. See man 1 lxc-create for
+ // available options.
+ CreateOptions []string `mapstructure:"create_options" required:"false"`
// Options to pass to lxc-start. For
- // instance, you can override parameters from the LXC container configuration
- // file via ["--define", "KEY=VALUE"]. Defaults to []. See
- // man 1 lxc-start for available options.
- StartOptions []string `mapstructure:"start_options" required:"false"`
+ // instance, you can override parameters from the LXC container configuration
+ // file via ["--define", "KEY=VALUE"]. Defaults to []. See
+ // man 1 lxc-start for available options.
+ StartOptions []string `mapstructure:"start_options" required:"false"`
// Options to pass to lxc-attach. For
- // instance, you can prevent the container from inheriting the host machine's
- // environment by specifying ["--clear-env"]. Defaults to []. See
- // man 1 lxc-attach for available options.
- AttachOptions []string `mapstructure:"attach_options" required:"false"`
+ // instance, you can prevent the container from inheriting the host machine's
+ // environment by specifying ["--clear-env"]. Defaults to []. See
+ // man 1 lxc-attach for available options.
+ AttachOptions []string `mapstructure:"attach_options" required:"false"`
// The LXC template name to use.
- Name string `mapstructure:"template_name" required:"true"`
+ Name string `mapstructure:"template_name" required:"true"`
// Options to pass to the given
- // lxc-template command, usually located in
- // /usr/share/lxc/templates/lxc-. Note: This gets passed as
- // ARGV to the template command. Ensure you have an array of strings, as a
- // single string with spaces probably won't work. Defaults to [].
- Parameters []string `mapstructure:"template_parameters" required:"false"`
+ // lxc-template command, usually located in
+ // /usr/share/lxc/templates/lxc-. Note: This gets passed as
+ // ARGV to the template command. Ensure you have an array of strings, as a
+ // single string with spaces probably won't work. Defaults to [].
+ Parameters []string `mapstructure:"template_parameters" required:"false"`
// Environmental variables to
- // use to build the template with.
- EnvVars []string `mapstructure:"template_environment_vars" required:"true"`
+ // use to build the template with.
+ EnvVars []string `mapstructure:"template_environment_vars" required:"true"`
// The minimum run level to wait for the
- // container to reach. Note some distributions (Ubuntu) simulate run levels
- // and may report 5 rather than 3.
- TargetRunlevel int `mapstructure:"target_runlevel" required:"false"`
- InitTimeout time.Duration
+ // container to reach. Note some distributions (Ubuntu) simulate run levels
+ // and may report 5 rather than 3.
+ TargetRunlevel int `mapstructure:"target_runlevel" required:"false"`
+ InitTimeout time.Duration
ctx interpolate.Context
}
diff --git a/builder/lxd/config.go b/builder/lxd/config.go
index 4f67bf0ca..b127f7aaf 100644
--- a/builder/lxd/config.go
+++ b/builder/lxd/config.go
@@ -15,29 +15,29 @@ import (
type Config struct {
common.PackerConfig `mapstructure:",squash"`
// The name of the output artifact. Defaults to
- // name.
- OutputImage string `mapstructure:"output_image" required:"false"`
- ContainerName string `mapstructure:"container_name"`
+ // name.
+ OutputImage string `mapstructure:"output_image" required:"false"`
+ ContainerName string `mapstructure:"container_name"`
// Lets you prefix all builder commands, such as
- // with ssh for a remote build host. Defaults to "".
- CommandWrapper string `mapstructure:"command_wrapper" required:"false"`
+ // with ssh for a remote build host. Defaults to "".
+ CommandWrapper string `mapstructure:"command_wrapper" required:"false"`
// The source image to use when creating the build
- // container. This can be a (local or remote) image (name or fingerprint).
- // E.G. my-base-image, ubuntu-daily:x, 08fababf6f27, ...
- Image string `mapstructure:"image" required:"true"`
- Profile string `mapstructure:"profile"`
+ // container. This can be a (local or remote) image (name or fingerprint).
+ // E.G. my-base-image, ubuntu-daily:x, 08fababf6f27, ...
+ Image string `mapstructure:"image" required:"true"`
+ Profile string `mapstructure:"profile"`
// The number of seconds to sleep between launching
- // the LXD instance and provisioning it; defaults to 3 seconds.
- InitSleep string `mapstructure:"init_sleep" required:"false"`
+ // the LXD instance and provisioning it; defaults to 3 seconds.
+ InitSleep string `mapstructure:"init_sleep" required:"false"`
// Pass key values to the publish
- // step to be set as properties on the output image. This is most helpful to
- // set the description, but can be used to set anything needed. See
- // https://stgraber.org/2016/03/30/lxd-2-0-image-management-512/
- // for more properties.
- PublishProperties map[string]string `mapstructure:"publish_properties" required:"false"`
+ // step to be set as properties on the output image. This is most helpful to
+ // set the description, but can be used to set anything needed. See
+ // https://stgraber.org/2016/03/30/lxd-2-0-image-management-512/
+ // for more properties.
+ PublishProperties map[string]string `mapstructure:"publish_properties" required:"false"`
// List of key/value pairs you wish to
- // pass to lxc launch via --config. Defaults to empty.
- LaunchConfig map[string]string `mapstructure:"launch_config" required:"false"`
+ // pass to lxc launch via --config. Defaults to empty.
+ LaunchConfig map[string]string `mapstructure:"launch_config" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/ncloud/config.go b/builder/ncloud/config.go
index f2ee13eef..15e531db8 100644
--- a/builder/ncloud/config.go
+++ b/builder/ncloud/config.go
@@ -18,40 +18,40 @@ import (
type Config struct {
common.PackerConfig `mapstructure:",squash"`
- AccessKey string `mapstructure:"access_key"`
- SecretKey string `mapstructure:"secret_key"`
+ AccessKey string `mapstructure:"access_key"`
+ SecretKey string `mapstructure:"secret_key"`
// Product code of an image to create.
- // (member_server_image_no is required if not specified)
- ServerImageProductCode string `mapstructure:"server_image_product_code" required:"true"`
+ // (member_server_image_no is required if not specified)
+ ServerImageProductCode string `mapstructure:"server_image_product_code" required:"true"`
// Product (spec) code to create.
- ServerProductCode string `mapstructure:"server_product_code" required:"true"`
+ ServerProductCode string `mapstructure:"server_product_code" required:"true"`
// Previous image code. If there is an
- // image previously created, it can be used to create a new image.
- // (server_image_product_code is required if not specified)
- MemberServerImageNo string `mapstructure:"member_server_image_no" required:"false"`
+ // image previously created, it can be used to create a new image.
+ // (server_image_product_code is required if not specified)
+ MemberServerImageNo string `mapstructure:"member_server_image_no" required:"false"`
// Name of an image to create.
- ServerImageName string `mapstructure:"server_image_name" required:"false"`
+ ServerImageName string `mapstructure:"server_image_name" required:"false"`
// Description of an image to create.
- ServerImageDescription string `mapstructure:"server_image_description" required:"false"`
+ ServerImageDescription string `mapstructure:"server_image_description" required:"false"`
// User data to apply when launching the instance. Note
- // that you need to be careful about escaping characters due to the templates
- // being JSON. It is often more convenient to use user_data_file, instead.
- // Packer will not automatically wait for a user script to finish before
- // shutting down the instance this must be handled in a provisioner.
- UserData string `mapstructure:"user_data" required:"false"`
+ // that you need to be careful about escaping characters due to the templates
+ // being JSON. It is often more convenient to use user_data_file, instead.
+ // Packer will not automatically wait for a user script to finish before
+ // shutting down the instance this must be handled in a provisioner.
+ UserData string `mapstructure:"user_data" required:"false"`
// Path to a file that will be used for the user
- // data when launching the instance.
- UserDataFile string `mapstructure:"user_data_file" required:"false"`
+ // data when launching the instance.
+ UserDataFile string `mapstructure:"user_data_file" required:"false"`
// You can add block storage ranging from 10
- // GB to 2000 GB, in increments of 10 GB.
- BlockStorageSize int `mapstructure:"block_storage_size" required:"false"`
+ // GB to 2000 GB, in increments of 10 GB.
+ BlockStorageSize int `mapstructure:"block_storage_size" required:"false"`
// Name of the region where you want to create an image.
- // (default: Korea)
- Region string `mapstructure:"region" required:"false"`
+ // (default: Korea)
+ Region string `mapstructure:"region" required:"false"`
// This is used to allow
- // winrm access when you create a Windows server. An ACG that specifies an
- // access source (0.0.0.0/0) and allowed port (5985) must be created in
- // advance.
+ // winrm access when you create a Windows server. An ACG that specifies an
+ // access source (0.0.0.0/0) and allowed port (5985) must be created in
+ // advance.
AccessControlGroupConfigurationNo string `mapstructure:"access_control_group_configuration_no" required:"false"`
Comm communicator.Config `mapstructure:",squash"`
diff --git a/builder/openstack/access_config.go b/builder/openstack/access_config.go
index 3bfac129a..563ab89fa 100644
--- a/builder/openstack/access_config.go
+++ b/builder/openstack/access_config.go
@@ -19,75 +19,75 @@ import (
// AccessConfig is for common configuration related to openstack access
type AccessConfig struct {
// The username or id used to connect to
- // the OpenStack service. If not specified, Packer will use the environment
- // variable OS_USERNAME or OS_USERID, if set. This is not required if
- // using access token or application credential instead of password, or if using
- // cloud.yaml.
- Username string `mapstructure:"username" required:"true"`
- UserID string `mapstructure:"user_id"`
+ // the OpenStack service. If not specified, Packer will use the environment
+ // variable OS_USERNAME or OS_USERID, if set. This is not required if
+ // using access token or application credential instead of password, or if using
+ // cloud.yaml.
+ Username string `mapstructure:"username" required:"true"`
+ UserID string `mapstructure:"user_id"`
// The password used to connect to the OpenStack
- // service. If not specified, Packer will use the environment variables
- // OS_PASSWORD, if set. This is not required if using access token or
- // application credential instead of password, or if using cloud.yaml.
- Password string `mapstructure:"password" required:"true"`
+ // service. If not specified, Packer will use the environment variables
+ // OS_PASSWORD, if set. This is not required if using access token or
+ // application credential instead of password, or if using cloud.yaml.
+ Password string `mapstructure:"password" required:"true"`
// The URL to the OpenStack Identity service.
- // If not specified, Packer will use the environment variables OS_AUTH_URL,
- // if set. This is not required if using cloud.yaml.
- IdentityEndpoint string `mapstructure:"identity_endpoint" required:"true"`
+ // If not specified, Packer will use the environment variables OS_AUTH_URL,
+ // if set. This is not required if using cloud.yaml.
+ IdentityEndpoint string `mapstructure:"identity_endpoint" required:"true"`
// The tenant ID or name to boot the
- // instance into. Some OpenStack installations require this. If not specified,
- // Packer will use the environment variable OS_TENANT_NAME or
- // OS_TENANT_ID, if set. Tenant is also called Project in later versions of
- // OpenStack.
- TenantID string `mapstructure:"tenant_id" required:"false"`
- TenantName string `mapstructure:"tenant_name"`
- DomainID string `mapstructure:"domain_id"`
+ // instance into. Some OpenStack installations require this. If not specified,
+ // Packer will use the environment variable OS_TENANT_NAME or
+ // OS_TENANT_ID, if set. Tenant is also called Project in later versions of
+ // OpenStack.
+ TenantID string `mapstructure:"tenant_id" required:"false"`
+ TenantName string `mapstructure:"tenant_name"`
+ DomainID string `mapstructure:"domain_id"`
// The Domain name or ID you are
- // authenticating with. OpenStack installations require this if identity v3 is
- // used. Packer will use the environment variable OS_DOMAIN_NAME or
- // OS_DOMAIN_ID, if set.
- DomainName string `mapstructure:"domain_name" required:"false"`
+ // authenticating with. OpenStack installations require this if identity v3 is
+ // used. Packer will use the environment variable OS_DOMAIN_NAME or
+ // OS_DOMAIN_ID, if set.
+ DomainName string `mapstructure:"domain_name" required:"false"`
// Whether or not the connection to OpenStack can be
- // done over an insecure connection. By default this is false.
- Insecure bool `mapstructure:"insecure" required:"false"`
+ // done over an insecure connection. By default this is false.
+ Insecure bool `mapstructure:"insecure" required:"false"`
// The name of the region, such as "DFW", in which to
- // launch the server to create the image. If not specified, Packer will use
- // the environment variable OS_REGION_NAME, if set.
- Region string `mapstructure:"region" required:"false"`
+ // launch the server to create the image. If not specified, Packer will use
+ // the environment variable OS_REGION_NAME, if set.
+ Region string `mapstructure:"region" required:"false"`
// The endpoint type to use. Can be any of
- // "internal", "internalURL", "admin", "adminURL", "public", and "publicURL".
- // By default this is "public".
- EndpointType string `mapstructure:"endpoint_type" required:"false"`
+ // "internal", "internalURL", "admin", "adminURL", "public", and "publicURL".
+ // By default this is "public".
+ EndpointType string `mapstructure:"endpoint_type" required:"false"`
// Custom CA certificate file path. If omitted the
- // OS_CACERT environment variable can be used.
- CACertFile string `mapstructure:"cacert" required:"false"`
+ // OS_CACERT environment variable can be used.
+ CACertFile string `mapstructure:"cacert" required:"false"`
// Client certificate file path for SSL client
- // authentication. If omitted the OS_CERT environment variable can be used.
- ClientCertFile string `mapstructure:"cert" required:"false"`
+ // authentication. If omitted the OS_CERT environment variable can be used.
+ ClientCertFile string `mapstructure:"cert" required:"false"`
// Client private key file path for SSL client
- // authentication. If omitted the OS_KEY environment variable can be used.
- ClientKeyFile string `mapstructure:"key" required:"false"`
+ // authentication. If omitted the OS_KEY environment variable can be used.
+ ClientKeyFile string `mapstructure:"key" required:"false"`
// the token (id) to use with token based authorization.
- // Packer will use the environment variable OS_TOKEN, if set.
- Token string `mapstructure:"token" required:"false"`
+ // Packer will use the environment variable OS_TOKEN, if set.
+ Token string `mapstructure:"token" required:"false"`
// The application credential name to
- // use with application credential based authorization. Packer will use the
- // environment variable OS_APPLICATION_CREDENTIAL_NAME, if set.
- ApplicationCredentialName string `mapstructure:"application_credential_name" required:"false"`
+ // use with application credential based authorization. Packer will use the
+ // environment variable OS_APPLICATION_CREDENTIAL_NAME, if set.
+ ApplicationCredentialName string `mapstructure:"application_credential_name" required:"false"`
// The application credential id to
- // use with application credential based authorization. Packer will use the
- // environment variable OS_APPLICATION_CREDENTIAL_ID, if set.
- ApplicationCredentialID string `mapstructure:"application_credential_id" required:"false"`
+ // use with application credential based authorization. Packer will use the
+ // environment variable OS_APPLICATION_CREDENTIAL_ID, if set.
+ ApplicationCredentialID string `mapstructure:"application_credential_id" required:"false"`
// The application credential secret
- // to use with application credential based authorization. Packer will use the
- // environment variable OS_APPLICATION_CREDENTIAL_SECRET, if set.
+ // to use with application credential based authorization. Packer will use the
+ // environment variable OS_APPLICATION_CREDENTIAL_SECRET, if set.
ApplicationCredentialSecret string `mapstructure:"application_credential_secret" required:"false"`
// An entry in a clouds.yaml file. See the OpenStack
- // os-client-config
- // documentation
- // for more information about clouds.yaml files. If omitted, the OS_CLOUD
- // environment variable is used.
- Cloud string `mapstructure:"cloud" required:"false"`
+ // os-client-config
+ // documentation
+ // for more information about clouds.yaml files. If omitted, the OS_CLOUD
+ // environment variable is used.
+ Cloud string `mapstructure:"cloud" required:"false"`
osClient *gophercloud.ProviderClient
}
diff --git a/builder/openstack/image_config.go b/builder/openstack/image_config.go
index dc9fb1ed5..57362fa3b 100644
--- a/builder/openstack/image_config.go
+++ b/builder/openstack/image_config.go
@@ -13,23 +13,23 @@ import (
// ImageConfig is for common configuration related to creating Images.
type ImageConfig struct {
// The name of the resulting image.
- ImageName string `mapstructure:"image_name" required:"true"`
+ ImageName string `mapstructure:"image_name" required:"true"`
// Glance metadata that will be
- // applied to the image.
- ImageMetadata map[string]string `mapstructure:"metadata" required:"false"`
+ // applied to the image.
+ ImageMetadata map[string]string `mapstructure:"metadata" required:"false"`
// One of "public", "private", "shared", or
- // "community".
+ // "community".
ImageVisibility imageservice.ImageVisibility `mapstructure:"image_visibility" required:"false"`
// List of members to add to the image
- // after creation. An image member is usually a project (also called the
- // "tenant") with whom the image is shared.
- ImageMembers []string `mapstructure:"image_members" required:"false"`
+ // after creation. An image member is usually a project (also called the
+ // "tenant") with whom the image is shared.
+ ImageMembers []string `mapstructure:"image_members" required:"false"`
// Disk format of the resulting image. This
- // option works if use_blockstorage_volume is true.
- ImageDiskFormat string `mapstructure:"image_disk_format" required:"false"`
+ // option works if use_blockstorage_volume is true.
+ ImageDiskFormat string `mapstructure:"image_disk_format" required:"false"`
// List of tags to add to the image after
- // creation.
- ImageTags []string `mapstructure:"image_tags" required:"false"`
+ // creation.
+ ImageTags []string `mapstructure:"image_tags" required:"false"`
}
func (c *ImageConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/openstack/run_config.go b/builder/openstack/run_config.go
index 1f7cdd40b..433e9b165 100644
--- a/builder/openstack/run_config.go
+++ b/builder/openstack/run_config.go
@@ -17,120 +17,120 @@ import (
type RunConfig struct {
Comm communicator.Config `mapstructure:",squash"`
// The ID or full URL to the base image to use. This
- // is the image that will be used to launch a new server and provision it.
- // Unless you specify completely custom SSH settings, the source image must
- // have cloud-init installed so that the keypair gets assigned properly.
- SourceImage string `mapstructure:"source_image" required:"true"`
+ // is the image that will be used to launch a new server and provision it.
+ // Unless you specify completely custom SSH settings, the source image must
+ // have cloud-init installed so that the keypair gets assigned properly.
+ SourceImage string `mapstructure:"source_image" required:"true"`
// The name of the base image to use. This is
- // an alternative way of providing source_image and only either of them can
- // be specified.
- SourceImageName string `mapstructure:"source_image_name" required:"true"`
+ // an alternative way of providing source_image and only either of them can
+ // be specified.
+ SourceImageName string `mapstructure:"source_image_name" required:"true"`
// The search filters for determining the base
- // image to use. This is an alternative way of providing source_image and
- // only one of these methods can be used. source_image will override the
- // filters.
- SourceImageFilters ImageFilter `mapstructure:"source_image_filter" required:"true"`
+ // image to use. This is an alternative way of providing source_image and
+ // only one of these methods can be used. source_image will override the
+ // filters.
+ SourceImageFilters ImageFilter `mapstructure:"source_image_filter" required:"true"`
// The ID, name, or full URL for the desired flavor for
- // the server to be created.
- Flavor string `mapstructure:"flavor" required:"true"`
+ // the server to be created.
+ Flavor string `mapstructure:"flavor" required:"true"`
// The availability zone to launch the server
- // in. If this isn't specified, the default enforced by your OpenStack cluster
- // will be used. This may be required for some OpenStack clusters.
- AvailabilityZone string `mapstructure:"availability_zone" required:"false"`
+ // in. If this isn't specified, the default enforced by your OpenStack cluster
+ // will be used. This may be required for some OpenStack clusters.
+ AvailabilityZone string `mapstructure:"availability_zone" required:"false"`
// For rackspace, whether or not to wait for
- // Rackconnect to assign the machine an IP address before connecting via SSH.
- // Defaults to false.
- RackconnectWait bool `mapstructure:"rackconnect_wait" required:"false"`
+ // Rackconnect to assign the machine an IP address before connecting via SSH.
+ // Defaults to false.
+ RackconnectWait bool `mapstructure:"rackconnect_wait" required:"false"`
// The ID or name of an external network that
- // can be used for creation of a new floating IP.
- FloatingIPNetwork string `mapstructure:"floating_ip_network" required:"false"`
+ // can be used for creation of a new floating IP.
+ FloatingIPNetwork string `mapstructure:"floating_ip_network" required:"false"`
// A specific floating IP to assign to this instance.
- FloatingIP string `mapstructure:"floating_ip" required:"false"`
+ FloatingIP string `mapstructure:"floating_ip" required:"false"`
// Whether or not to attempt to reuse existing
- // unassigned floating ips in the project before allocating a new one. Note
- // that it is not possible to safely do this concurrently, so if you are
- // running multiple openstack builds concurrently, or if other processes are
- // assigning and using floating IPs in the same openstack project while packer
- // is running, you should not set this to true. Defaults to false.
- ReuseIPs bool `mapstructure:"reuse_ips" required:"false"`
+ // unassigned floating ips in the project before allocating a new one. Note
+ // that it is not possible to safely do this concurrently, so if you are
+ // running multiple openstack builds concurrently, or if other processes are
+ // assigning and using floating IPs in the same openstack project while packer
+ // is running, you should not set this to true. Defaults to false.
+ ReuseIPs bool `mapstructure:"reuse_ips" required:"false"`
// A list of security groups by name to
- // add to this instance.
- SecurityGroups []string `mapstructure:"security_groups" required:"false"`
+ // add to this instance.
+ SecurityGroups []string `mapstructure:"security_groups" required:"false"`
// A list of networks by UUID to attach to
- // this instance.
- Networks []string `mapstructure:"networks" required:"false"`
+ // this instance.
+ Networks []string `mapstructure:"networks" required:"false"`
// A list of ports by UUID to attach to this
- // instance.
- Ports []string `mapstructure:"ports" required:"false"`
+ // instance.
+ Ports []string `mapstructure:"ports" required:"false"`
// User data to apply when launching the instance. Note
- // that you need to be careful about escaping characters due to the templates
- // being JSON. It is often more convenient to use user_data_file, instead.
- // Packer will not automatically wait for a user script to finish before
- // shutting down the instance this must be handled in a provisioner.
- UserData string `mapstructure:"user_data" required:"false"`
+ // that you need to be careful about escaping characters due to the templates
+ // being JSON. It is often more convenient to use user_data_file, instead.
+ // Packer will not automatically wait for a user script to finish before
+ // shutting down the instance this must be handled in a provisioner.
+ UserData string `mapstructure:"user_data" required:"false"`
// Path to a file that will be used for the user
- // data when launching the instance.
- UserDataFile string `mapstructure:"user_data_file" required:"false"`
+ // data when launching the instance.
+ UserDataFile string `mapstructure:"user_data_file" required:"false"`
// Name that is applied to the server instance
- // created by Packer. If this isn't specified, the default is same as
- // image_name.
- InstanceName string `mapstructure:"instance_name" required:"false"`
+ // created by Packer. If this isn't specified, the default is same as
+ // image_name.
+ InstanceName string `mapstructure:"instance_name" required:"false"`
// Metadata that is
- // applied to the server instance created by Packer. Also called server
- // properties in some documentation. The strings have a max size of 255 bytes
- // each.
- InstanceMetadata map[string]string `mapstructure:"instance_metadata" required:"false"`
+ // applied to the server instance created by Packer. Also called server
+ // properties in some documentation. The strings have a max size of 255 bytes
+ // each.
+ InstanceMetadata map[string]string `mapstructure:"instance_metadata" required:"false"`
// Whether to force the OpenStack instance to be
- // forcefully deleted. This is useful for environments that have
- // reclaim / soft deletion enabled. By default this is false.
- ForceDelete bool `mapstructure:"force_delete" required:"false"`
+ // forcefully deleted. This is useful for environments that have
+ // reclaim / soft deletion enabled. By default this is false.
+ ForceDelete bool `mapstructure:"force_delete" required:"false"`
// Whether or not nova should use ConfigDrive for
- // cloud-init metadata.
+ // cloud-init metadata.
ConfigDrive bool `mapstructure:"config_drive" required:"false"`
// Deprecated use floating_ip_network
- // instead.
+ // instead.
FloatingIPPool string `mapstructure:"floating_ip_pool" required:"false"`
// Use Block Storage service volume for
- // the instance root volume instead of Compute service local volume (default).
- UseBlockStorageVolume bool `mapstructure:"use_blockstorage_volume" required:"false"`
+ // the instance root volume instead of Compute service local volume (default).
+ UseBlockStorageVolume bool `mapstructure:"use_blockstorage_volume" required:"false"`
// Name of the Block Storage service volume. If this
- // isn't specified, random string will be used.
- VolumeName string `mapstructure:"volume_name" required:"false"`
+ // isn't specified, random string will be used.
+ VolumeName string `mapstructure:"volume_name" required:"false"`
// Type of the Block Storage service volume. If this
- // isn't specified, the default enforced by your OpenStack cluster will be
- // used.
- VolumeType string `mapstructure:"volume_type" required:"false"`
+ // isn't specified, the default enforced by your OpenStack cluster will be
+ // used.
+ VolumeType string `mapstructure:"volume_type" required:"false"`
// Size of the Block Storage service volume in GB. If
- // this isn't specified, it is set to source image min disk value (if set) or
- // calculated from the source image bytes size. Note that in some cases this
- // needs to be specified, if use_blockstorage_volume is true.
- VolumeSize int `mapstructure:"volume_size" required:"false"`
+ // this isn't specified, it is set to source image min disk value (if set) or
+ // calculated from the source image bytes size. Note that in some cases this
+ // needs to be specified, if use_blockstorage_volume is true.
+ VolumeSize int `mapstructure:"volume_size" required:"false"`
// Availability zone of the Block
- // Storage service volume. If omitted, Compute instance availability zone will
- // be used. If both of Compute instance and Block Storage volume availability
- // zones aren't specified, the default enforced by your OpenStack cluster will
- // be used.
+ // Storage service volume. If omitted, Compute instance availability zone will
+ // be used. If both of Compute instance and Block Storage volume availability
+ // zones aren't specified, the default enforced by your OpenStack cluster will
+ // be used.
VolumeAvailabilityZone string `mapstructure:"volume_availability_zone" required:"false"`
// Not really used, but here for BC
OpenstackProvider string `mapstructure:"openstack_provider"`
// Deprecated use floating_ip or
- // floating_ip_pool instead.
- UseFloatingIp bool `mapstructure:"use_floating_ip" required:"false"`
+ // floating_ip_pool instead.
+ UseFloatingIp bool `mapstructure:"use_floating_ip" required:"false"`
sourceImageOpts images.ListOpts
}
type ImageFilter struct {
// filters used to select a source_image.
- // NOTE: This will fail unless exactly one image is returned, or
- // most_recent is set to true. Of the filters described in
- // ImageService, the
- // following are valid:
- Filters ImageFilterOptions `mapstructure:"filters" required:"false"`
+ // NOTE: This will fail unless exactly one image is returned, or
+ // most_recent is set to true. Of the filters described in
+ // ImageService, the
+ // following are valid:
+ Filters ImageFilterOptions `mapstructure:"filters" required:"false"`
// Selects the newest created image when true.
- // This is most useful for selecting a daily distro build.
- MostRecent bool `mapstructure:"most_recent" required:"false"`
+ // This is most useful for selecting a daily distro build.
+ MostRecent bool `mapstructure:"most_recent" required:"false"`
}
type ImageFilterOptions struct {
diff --git a/builder/parallels/common/hw_config.go b/builder/parallels/common/hw_config.go
index 3c0e0aeec..f803450c8 100644
--- a/builder/parallels/common/hw_config.go
+++ b/builder/parallels/common/hw_config.go
@@ -10,17 +10,17 @@ import (
type HWConfig struct {
// The number of cpus to use for building the VM.
- // Defaults to 1.
- CpuCount int `mapstructure:"cpus" required:"false"`
+ // Defaults to 1.
+ CpuCount int `mapstructure:"cpus" required:"false"`
// The amount of memory to use for building the VM in
- // megabytes. Defaults to 512 megabytes.
+ // megabytes. Defaults to 512 megabytes.
MemorySize int `mapstructure:"memory" required:"false"`
// Specifies whether to enable the sound device when
- // building the VM. Defaults to false.
+ // building the VM. Defaults to false.
Sound bool `mapstructure:"sound" required:"false"`
// Specifies whether to enable the USB bus when building
- // the VM. Defaults to false.
- USB bool `mapstructure:"usb" required:"false"`
+ // the VM. Defaults to false.
+ USB bool `mapstructure:"usb" required:"false"`
}
func (c *HWConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/parallels/common/output_config.go b/builder/parallels/common/output_config.go
index c773cad01..8508c427e 100644
--- a/builder/parallels/common/output_config.go
+++ b/builder/parallels/common/output_config.go
@@ -14,11 +14,11 @@ import (
// OutputConfig contains the configuration for builder's output.
type OutputConfig struct {
// This is the path to the directory where the
- // resulting virtual machine will be created. This may be relative or absolute.
- // If relative, the path is relative to the working directory when packer
- // is executed. This directory must not exist or be empty prior to running
- // the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the
- // name of the build.
+ // resulting virtual machine will be created. This may be relative or absolute.
+ // If relative, the path is relative to the working directory when packer
+ // is executed. This directory must not exist or be empty prior to running
+ // the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the
+ // name of the build.
OutputDir string `mapstructure:"output_directory" required:"false"`
}
diff --git a/builder/parallels/common/prlctl_config.go b/builder/parallels/common/prlctl_config.go
index 753de4c3e..d22217651 100644
--- a/builder/parallels/common/prlctl_config.go
+++ b/builder/parallels/common/prlctl_config.go
@@ -10,15 +10,15 @@ import (
// before the VM start.
type PrlctlConfig struct {
// Custom prlctl commands to execute
- // in order to further customize the virtual machine being created. The value
- // of this is an array of commands to execute. The commands are executed in the
- // order defined in the template. For each command, the command is defined
- // itself as an array of strings, where each string represents a single
- // argument on the command-line to prlctl (but excluding prlctl itself).
- // Each arg is treated as a configuration
- // template, where the Name
- // variable is replaced with the VM name. More details on how to use prlctl
- // are below.
+ // in order to further customize the virtual machine being created. The value
+ // of this is an array of commands to execute. The commands are executed in the
+ // order defined in the template. For each command, the command is defined
+ // itself as an array of strings, where each string represents a single
+ // argument on the command-line to prlctl (but excluding prlctl itself).
+ // Each arg is treated as a configuration
+ // template, where the Name
+ // variable is replaced with the VM name. More details on how to use prlctl
+ // are below.
Prlctl [][]string `mapstructure:"prlctl" required:"false"`
}
diff --git a/builder/parallels/common/prlctl_post_config.go b/builder/parallels/common/prlctl_post_config.go
index d1763800d..6b8dc43cc 100644
--- a/builder/parallels/common/prlctl_post_config.go
+++ b/builder/parallels/common/prlctl_post_config.go
@@ -10,8 +10,8 @@ import (
// in the end of artifact build.
type PrlctlPostConfig struct {
// Identical to prlctl, except
- // that it is run after the virtual machine is shutdown, and before the virtual
- // machine is exported.
+ // that it is run after the virtual machine is shutdown, and before the virtual
+ // machine is exported.
PrlctlPost [][]string `mapstructure:"prlctl_post" required:"false"`
}
diff --git a/builder/parallels/common/prlctl_version_config.go b/builder/parallels/common/prlctl_version_config.go
index 6a7f97ef3..fd6a5e54e 100644
--- a/builder/parallels/common/prlctl_version_config.go
+++ b/builder/parallels/common/prlctl_version_config.go
@@ -9,10 +9,10 @@ import (
// PrlctlVersionConfig contains the configuration for `prlctl` version.
type PrlctlVersionConfig struct {
// The path within the virtual machine to
- // upload a file that contains the prlctl version that was used to create
- // the machine. This information can be useful for provisioning. By default
- // this is ".prlctl_version", which will generally upload it into the
- // home directory.
+ // upload a file that contains the prlctl version that was used to create
+ // the machine. This information can be useful for provisioning. By default
+ // this is ".prlctl_version", which will generally upload it into the
+ // home directory.
PrlctlVersionFile string `mapstructure:"prlctl_version_file" required:"false"`
}
diff --git a/builder/parallels/common/shutdown_config.go b/builder/parallels/common/shutdown_config.go
index 658f90018..3bf491460 100644
--- a/builder/parallels/common/shutdown_config.go
+++ b/builder/parallels/common/shutdown_config.go
@@ -12,13 +12,13 @@ import (
// ShutdownConfig contains the configuration for VM shutdown.
type ShutdownConfig struct {
// The command to use to gracefully shut down the
- // machine once all the provisioning is done. By default this is an empty
- // string, which tells Packer to just forcefully shut down the machine.
- ShutdownCommand string `mapstructure:"shutdown_command" required:"false"`
+ // machine once all the provisioning is done. By default this is an empty
+ // string, which tells Packer to just forcefully shut down the machine.
+ ShutdownCommand string `mapstructure:"shutdown_command" required:"false"`
// The amount of time to wait after executing the
- // shutdown_command for the virtual machine to actually shut down. If it
- // doesn't shut down in this time, it is an error. By default, the timeout is
- // "5m", or five minutes.
+ // shutdown_command for the virtual machine to actually shut down. If it
+ // doesn't shut down in this time, it is an error. By default, the timeout is
+ // "5m", or five minutes.
RawShutdownTimeout string `mapstructure:"shutdown_timeout" required:"false"`
ShutdownTimeout time.Duration ``
diff --git a/builder/parallels/common/tools_config.go b/builder/parallels/common/tools_config.go
index 0b746b939..357137ee4 100644
--- a/builder/parallels/common/tools_config.go
+++ b/builder/parallels/common/tools_config.go
@@ -20,25 +20,25 @@ const (
// ToolsConfig contains the builder configuration related to Parallels Tools.
type ToolsConfig struct {
// The flavor of the Parallels Tools ISO to
- // install into the VM. Valid values are "win", "lin", "mac", "os2"
- // and "other". This can be omitted only if parallels_tools_mode
- // is "disable".
- ParallelsToolsFlavor string `mapstructure:"parallels_tools_flavor" required:"true"`
+ // install into the VM. Valid values are "win", "lin", "mac", "os2"
+ // and "other". This can be omitted only if parallels_tools_mode
+ // is "disable".
+ ParallelsToolsFlavor string `mapstructure:"parallels_tools_flavor" required:"true"`
// The path in the virtual machine to
- // upload Parallels Tools. This only takes effect if parallels_tools_mode
- // is "upload". This is a configuration
- // template that has a single
- // valid variable: Flavor, which will be the value of
- // parallels_tools_flavor. By default this is "prl-tools-{{.Flavor}}.iso"
- // which should upload into the login directory of the user.
+ // upload Parallels Tools. This only takes effect if parallels_tools_mode
+ // is "upload". This is a configuration
+ // template that has a single
+ // valid variable: Flavor, which will be the value of
+ // parallels_tools_flavor. By default this is "prl-tools-{{.Flavor}}.iso"
+ // which should upload into the login directory of the user.
ParallelsToolsGuestPath string `mapstructure:"parallels_tools_guest_path" required:"false"`
// The method by which Parallels Tools are
- // made available to the guest for installation. Valid options are "upload",
- // "attach", or "disable". If the mode is "attach" the Parallels Tools ISO will
- // be attached as a CD device to the virtual machine. If the mode is "upload"
- // the Parallels Tools ISO will be uploaded to the path specified by
- // parallels_tools_guest_path. The default value is "upload".
- ParallelsToolsMode string `mapstructure:"parallels_tools_mode" required:"false"`
+ // made available to the guest for installation. Valid options are "upload",
+ // "attach", or "disable". If the mode is "attach" the Parallels Tools ISO will
+ // be attached as a CD device to the virtual machine. If the mode is "upload"
+ // the Parallels Tools ISO will be uploaded to the path specified by
+ // parallels_tools_guest_path. The default value is "upload".
+ ParallelsToolsMode string `mapstructure:"parallels_tools_mode" required:"false"`
}
// Prepare validates & sets up configuration options related to Parallels Tools.
diff --git a/builder/parallels/iso/builder.go b/builder/parallels/iso/builder.go
index a2c61f1ba..f55314352 100644
--- a/builder/parallels/iso/builder.go
+++ b/builder/parallels/iso/builder.go
@@ -39,43 +39,43 @@ type Config struct {
parallelscommon.SSHConfig `mapstructure:",squash"`
parallelscommon.ToolsConfig `mapstructure:",squash"`
// The size, in megabytes, of the hard disk to create
- // for the VM. By default, this is 40000 (about 40 GB).
- DiskSize uint `mapstructure:"disk_size" required:"false"`
+ // for the VM. By default, this is 40000 (about 40 GB).
+ DiskSize uint `mapstructure:"disk_size" required:"false"`
// The type for image file based virtual disk drives,
- // defaults to expand. Valid options are expand (expanding disk) that the
- // image file is small initially and grows in size as you add data to it, and
- // plain (plain disk) that the image file has a fixed size from the moment it
- // is created (i.e the space is allocated for the full drive). Plain disks
- // perform faster than expanding disks. skip_compaction will be set to true
- // automatically for plain disks.
- DiskType string `mapstructure:"disk_type" required:"false"`
+ // defaults to expand. Valid options are expand (expanding disk) that the
+ // image file is small initially and grows in size as you add data to it, and
+ // plain (plain disk) that the image file has a fixed size from the moment it
+ // is created (i.e the space is allocated for the full drive). Plain disks
+ // perform faster than expanding disks. skip_compaction will be set to true
+ // automatically for plain disks.
+ DiskType string `mapstructure:"disk_type" required:"false"`
// The guest OS type being installed. By default
- // this is "other", but you can get dramatic performance improvements by
- // setting this to the proper value. To view all available values for this run
- // prlctl create x --distribution list. Setting the correct value hints to
- // Parallels Desktop how to optimize the virtual hardware to work best with
- // that operating system.
- GuestOSType string `mapstructure:"guest_os_type" required:"false"`
+ // this is "other", but you can get dramatic performance improvements by
+ // setting this to the proper value. To view all available values for this run
+ // prlctl create x --distribution list. Setting the correct value hints to
+ // Parallels Desktop how to optimize the virtual hardware to work best with
+ // that operating system.
+ GuestOSType string `mapstructure:"guest_os_type" required:"false"`
// The type of controller that the hard
- // drives are attached to, defaults to "sata". Valid options are "sata", "ide",
- // and "scsi".
- HardDriveInterface string `mapstructure:"hard_drive_interface" required:"false"`
+ // drives are attached to, defaults to "sata". Valid options are "sata", "ide",
+ // and "scsi".
+ HardDriveInterface string `mapstructure:"hard_drive_interface" required:"false"`
// A list of which interfaces on the
- // host should be searched for a IP address. The first IP address found on one
- // of these will be used as {{ .HTTPIP }} in the boot_command. Defaults to
- // ["en0", "en1", "en2", "en3", "en4", "en5", "en6", "en7", "en8", "en9",
- // "ppp0", "ppp1", "ppp2"].
- HostInterfaces []string `mapstructure:"host_interfaces" required:"false"`
+ // host should be searched for a IP address. The first IP address found on one
+ // of these will be used as {{ .HTTPIP }} in the boot_command. Defaults to
+ // ["en0", "en1", "en2", "en3", "en4", "en5", "en6", "en7", "en8", "en9",
+ // "ppp0", "ppp1", "ppp2"].
+ HostInterfaces []string `mapstructure:"host_interfaces" required:"false"`
// Virtual disk image is compacted at the end of
- // the build process using prl_disk_tool utility (except for the case that
- // disk_type is set to plain). In certain rare cases, this might corrupt
- // the resulting disk image. If you find this to be the case, you can disable
- // compaction using this configuration value.
- SkipCompaction bool `mapstructure:"skip_compaction" required:"false"`
+ // the build process using prl_disk_tool utility (except for the case that
+ // disk_type is set to plain). In certain rare cases, this might corrupt
+ // the resulting disk image. If you find this to be the case, you can disable
+ // compaction using this configuration value.
+ SkipCompaction bool `mapstructure:"skip_compaction" required:"false"`
// This is the name of the PVM directory for the new
- // virtual machine, without the file extension. By default this is
- // "packer-BUILDNAME", where "BUILDNAME" is the name of the build.
- VMName string `mapstructure:"vm_name" required:"false"`
+ // virtual machine, without the file extension. By default this is
+ // "packer-BUILDNAME", where "BUILDNAME" is the name of the build.
+ VMName string `mapstructure:"vm_name" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/parallels/pvm/config.go b/builder/parallels/pvm/config.go
index 8c11b86d7..46ba559af 100644
--- a/builder/parallels/pvm/config.go
+++ b/builder/parallels/pvm/config.go
@@ -27,22 +27,22 @@ type Config struct {
bootcommand.BootConfig `mapstructure:",squash"`
parallelscommon.ToolsConfig `mapstructure:",squash"`
// The path to a PVM directory that acts as the source
- // of this build.
- SourcePath string `mapstructure:"source_path" required:"true"`
+ // of this build.
+ SourcePath string `mapstructure:"source_path" required:"true"`
// Virtual disk image is compacted at the end of
- // the build process using prl_disk_tool utility (except for the case that
- // disk_type is set to plain). In certain rare cases, this might corrupt
- // the resulting disk image. If you find this to be the case, you can disable
- // compaction using this configuration value.
- SkipCompaction bool `mapstructure:"skip_compaction" required:"false"`
+ // the build process using prl_disk_tool utility (except for the case that
+ // disk_type is set to plain). In certain rare cases, this might corrupt
+ // the resulting disk image. If you find this to be the case, you can disable
+ // compaction using this configuration value.
+ SkipCompaction bool `mapstructure:"skip_compaction" required:"false"`
// This is the name of the PVM directory for the new
- // virtual machine, without the file extension. By default this is
- // "packer-BUILDNAME", where "BUILDNAME" is the name of the build.
- VMName string `mapstructure:"vm_name" required:"false"`
+ // virtual machine, without the file extension. By default this is
+ // "packer-BUILDNAME", where "BUILDNAME" is the name of the build.
+ VMName string `mapstructure:"vm_name" required:"false"`
// If this is "false" the MAC address of the first
- // NIC will reused when imported else a new MAC address will be generated
- // by Parallels. Defaults to "false".
- ReassignMAC bool `mapstructure:"reassign_mac" required:"false"`
+ // NIC will reused when imported else a new MAC address will be generated
+ // by Parallels. Defaults to "false".
+ ReassignMAC bool `mapstructure:"reassign_mac" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go
index 97fe3f94e..7473b071d 100644
--- a/builder/qemu/builder.go
+++ b/builder/qemu/builder.go
@@ -98,133 +98,133 @@ type Config struct {
Comm communicator.Config `mapstructure:",squash"`
common.FloppyConfig `mapstructure:",squash"`
// Use iso from provided url. Qemu must support
- // curl block device. This defaults to false.
- ISOSkipCache bool `mapstructure:"iso_skip_cache" required:"false"`
+ // curl block device. This defaults to false.
+ ISOSkipCache bool `mapstructure:"iso_skip_cache" required:"false"`
// The accelerator type to use when running the VM.
- // This may be none, kvm, tcg, hax, hvf, whpx, or xen. The appropriate
- // software must have already been installed on your build machine to use the
- // accelerator you specified. When no accelerator is specified, Packer will try
- // to use kvm if it is available but will default to tcg otherwise.
- Accelerator string `mapstructure:"accelerator" required:"false"`
+ // This may be none, kvm, tcg, hax, hvf, whpx, or xen. The appropriate
+ // software must have already been installed on your build machine to use the
+ // accelerator you specified. When no accelerator is specified, Packer will try
+ // to use kvm if it is available but will default to tcg otherwise.
+ Accelerator string `mapstructure:"accelerator" required:"false"`
// The number of cpus to use when building the VM.
- // The default is 1 CPU.
- CpuCount int `mapstructure:"cpus" required:"false"`
+ // The default is 1 CPU.
+ CpuCount int `mapstructure:"cpus" required:"false"`
// The interface to use for the disk. Allowed
- // values include any of ide, scsi, virtio or virtio-scsi*. Note
- // also that any boot commands or kickstart type scripts must have proper
- // adjustments for resulting device names. The Qemu builder uses virtio by
- // default.
- DiskInterface string `mapstructure:"disk_interface" required:"false"`
+ // values include any of ide, scsi, virtio or virtio-scsi*. Note
+ // also that any boot commands or kickstart type scripts must have proper
+ // adjustments for resulting device names. The Qemu builder uses virtio by
+ // default.
+ DiskInterface string `mapstructure:"disk_interface" required:"false"`
// The size, in megabytes, of the hard disk to create
- // for the VM. By default, this is 40960 (40 GB).
- DiskSize uint `mapstructure:"disk_size" required:"false"`
+ // for the VM. By default, this is 40960 (40 GB).
+ DiskSize uint `mapstructure:"disk_size" required:"false"`
// The cache mode to use for disk. Allowed values
- // include any of writethrough, writeback, none, unsafe
- // or directsync. By default, this is set to writeback.
- DiskCache string `mapstructure:"disk_cache" required:"false"`
+ // include any of writethrough, writeback, none, unsafe
+ // or directsync. By default, this is set to writeback.
+ DiskCache string `mapstructure:"disk_cache" required:"false"`
// The discard mode to use for disk. Allowed values
- // include any of unmap or ignore. By default, this is set to ignore.
- DiskDiscard string `mapstructure:"disk_discard" required:"false"`
+ // include any of unmap or ignore. By default, this is set to ignore.
+ DiskDiscard string `mapstructure:"disk_discard" required:"false"`
// The detect-zeroes mode to use for disk.
- // Allowed values include any of unmap, on or off. Defaults to off.
- // When the value is "off" we don't set the flag in the qemu command, so that
- // Packer still works with old versions of QEMU that don't have this option.
- DetectZeroes string `mapstructure:"disk_detect_zeroes" required:"false"`
+ // Allowed values include any of unmap, on or off. Defaults to off.
+ // When the value is "off" we don't set the flag in the qemu command, so that
+ // Packer still works with old versions of QEMU that don't have this option.
+ DetectZeroes string `mapstructure:"disk_detect_zeroes" required:"false"`
// Packer compacts the QCOW2 image using
- // qemu-img convert. Set this option to true to disable compacting.
- // Defaults to false.
- SkipCompaction bool `mapstructure:"skip_compaction" required:"false"`
+ // qemu-img convert. Set this option to true to disable compacting.
+ // Defaults to false.
+ SkipCompaction bool `mapstructure:"skip_compaction" required:"false"`
// Apply compression to the QCOW2 disk file
- // using qemu-img convert. Defaults to false.
- DiskCompression bool `mapstructure:"disk_compression" required:"false"`
+ // using qemu-img convert. Defaults to false.
+ DiskCompression bool `mapstructure:"disk_compression" required:"false"`
// Either qcow2 or raw, this specifies the output
- // format of the virtual machine image. This defaults to qcow2.
- Format string `mapstructure:"format" required:"false"`
+ // format of the virtual machine image. This defaults to qcow2.
+ Format string `mapstructure:"format" required:"false"`
// Packer defaults to building QEMU virtual machines by
- // launching a GUI that shows the console of the machine being built. When this
- // value is set to true, the machine will start without a console.
- Headless bool `mapstructure:"headless" required:"false"`
+ // launching a GUI that shows the console of the machine being built. When this
+ // value is set to true, the machine will start without a console.
+ Headless bool `mapstructure:"headless" required:"false"`
// Packer defaults to building from an ISO file, this
- // parameter controls whether the ISO URL supplied is actually a bootable
- // QEMU image. When this value is set to true, the machine will either clone
- // the source or use it as a backing file (if use_backing_file is true);
- // then, it will resize the image according to disk_size and boot it.
- DiskImage bool `mapstructure:"disk_image" required:"false"`
+ // parameter controls whether the ISO URL supplied is actually a bootable
+ // QEMU image. When this value is set to true, the machine will either clone
+ // the source or use it as a backing file (if use_backing_file is true);
+ // then, it will resize the image according to disk_size and boot it.
+ DiskImage bool `mapstructure:"disk_image" required:"false"`
// Only applicable when disk_image is true
- // and format is qcow2, set this option to true to create a new QCOW2
- // file that uses the file located at iso_url as a backing file. The new file
- // will only contain blocks that have changed compared to the backing file, so
- // enabling this option can significantly reduce disk usage.
- UseBackingFile bool `mapstructure:"use_backing_file" required:"false"`
+ // and format is qcow2, set this option to true to create a new QCOW2
+ // file that uses the file located at iso_url as a backing file. The new file
+ // will only contain blocks that have changed compared to the backing file, so
+ // enabling this option can significantly reduce disk usage.
+ UseBackingFile bool `mapstructure:"use_backing_file" required:"false"`
// The type of machine emulation to use. Run your
- // qemu binary with the flags -machine help to list available types for
- // your system. This defaults to pc.
- MachineType string `mapstructure:"machine_type" required:"false"`
+ // qemu binary with the flags -machine help to list available types for
+ // your system. This defaults to pc.
+ MachineType string `mapstructure:"machine_type" required:"false"`
// The amount of memory to use when building the VM
- // in megabytes. This defaults to 512 megabytes.
- MemorySize int `mapstructure:"memory" required:"false"`
+ // in megabytes. This defaults to 512 megabytes.
+ MemorySize int `mapstructure:"memory" required:"false"`
// The driver to use for the network interface. Allowed
- // values ne2k_pci, i82551, i82557b, i82559er, rtl8139, e1000,
- // pcnet, virtio, virtio-net, virtio-net-pci, usb-net, i82559a,
- // i82559b, i82559c, i82550, i82562, i82557a, i82557c, i82801,
- // vmxnet3, i82558a or i82558b. The Qemu builder uses virtio-net by
- // default.
- NetDevice string `mapstructure:"net_device" required:"false"`
+ // values ne2k_pci, i82551, i82557b, i82559er, rtl8139, e1000,
+ // pcnet, virtio, virtio-net, virtio-net-pci, usb-net, i82559a,
+ // i82559b, i82559c, i82550, i82562, i82557a, i82557c, i82801,
+ // vmxnet3, i82558a or i82558b. The Qemu builder uses virtio-net by
+ // default.
+ NetDevice string `mapstructure:"net_device" required:"false"`
// This is the path to the directory where the
- // resulting virtual machine will be created. This may be relative or absolute.
- // If relative, the path is relative to the working directory when packer
- // is executed. This directory must not exist or be empty prior to running
- // the builder. By default this is output-BUILDNAME where "BUILDNAME" is the
- // name of the build.
- OutputDir string `mapstructure:"output_directory" required:"false"`
+ // resulting virtual machine will be created. This may be relative or absolute.
+ // If relative, the path is relative to the working directory when packer
+ // is executed. This directory must not exist or be empty prior to running
+ // the builder. By default this is output-BUILDNAME where "BUILDNAME" is the
+ // name of the build.
+ OutputDir string `mapstructure:"output_directory" required:"false"`
// Allows complete control over the
- // qemu command line (though not, at this time, qemu-img). Each array of
- // strings makes up a command line switch that overrides matching default
- // switch/value pairs. Any value specified as an empty string is ignored. All
- // values after the switch are concatenated with no separator.
- QemuArgs [][]string `mapstructure:"qemuargs" required:"false"`
+ // qemu command line (though not, at this time, qemu-img). Each array of
+ // strings makes up a command line switch that overrides matching default
+ // switch/value pairs. Any value specified as an empty string is ignored. All
+ // values after the switch are concatenated with no separator.
+ QemuArgs [][]string `mapstructure:"qemuargs" required:"false"`
// The name of the Qemu binary to look for. This
- // defaults to qemu-system-x86_64, but may need to be changed for
- // some platforms. For example qemu-kvm, or qemu-system-i386 may be a
- // better choice for some systems.
- QemuBinary string `mapstructure:"qemu_binary" required:"false"`
+ // defaults to qemu-system-x86_64, but may need to be changed for
+ // some platforms. For example qemu-kvm, or qemu-system-i386 may be a
+ // better choice for some systems.
+ QemuBinary string `mapstructure:"qemu_binary" required:"false"`
// The command to use to gracefully shut down the
- // machine once all the provisioning is done. By default this is an empty
- // string, which tells Packer to just forcefully shut down the machine unless a
- // shutdown command takes place inside script so this may safely be omitted. It
- // is important to add a shutdown_command. By default Packer halts the virtual
- // machine and the file system may not be sync'd. Thus, changes made in a
- // provisioner might not be saved. If one or more scripts require a reboot it is
- // suggested to leave this blank since reboots may fail and specify the final
- // shutdown command in your last script.
- ShutdownCommand string `mapstructure:"shutdown_command" required:"false"`
+ // machine once all the provisioning is done. By default this is an empty
+ // string, which tells Packer to just forcefully shut down the machine unless a
+ // shutdown command takes place inside script so this may safely be omitted. It
+ // is important to add a shutdown_command. By default Packer halts the virtual
+ // machine and the file system may not be sync'd. Thus, changes made in a
+ // provisioner might not be saved. If one or more scripts require a reboot it is
+ // suggested to leave this blank since reboots may fail and specify the final
+ // shutdown command in your last script.
+ ShutdownCommand string `mapstructure:"shutdown_command" required:"false"`
// The minimum and
- // maximum port to use for the SSH port on the host machine which is forwarded
- // to the SSH port on the guest machine. Because Packer often runs in parallel,
- // Packer will choose a randomly available port in this range to use as the
- // host port. By default this is 2222 to 4444.
- SSHHostPortMin int `mapstructure:"ssh_host_port_min" required:"false"`
- SSHHostPortMax int `mapstructure:"ssh_host_port_max"`
+ // maximum port to use for the SSH port on the host machine which is forwarded
+ // to the SSH port on the guest machine. Because Packer often runs in parallel,
+ // Packer will choose a randomly available port in this range to use as the
+ // host port. By default this is 2222 to 4444.
+ SSHHostPortMin int `mapstructure:"ssh_host_port_min" required:"false"`
+ SSHHostPortMax int `mapstructure:"ssh_host_port_max"`
// If true, do not pass a -display option
- // to qemu, allowing it to choose the default. This may be needed when running
- // under macOS, and getting errors about sdl not being available.
- UseDefaultDisplay bool `mapstructure:"use_default_display" required:"false"`
+ // to qemu, allowing it to choose the default. This may be needed when running
+ // under macOS, and getting errors about sdl not being available.
+ UseDefaultDisplay bool `mapstructure:"use_default_display" required:"false"`
// The IP address that should be
- // binded to for VNC. By default packer will use 127.0.0.1 for this. If you
- // wish to bind to all interfaces use 0.0.0.0.
- VNCBindAddress string `mapstructure:"vnc_bind_address" required:"false"`
+ // binded to for VNC. By default packer will use 127.0.0.1 for this. If you
+ // wish to bind to all interfaces use 0.0.0.0.
+ VNCBindAddress string `mapstructure:"vnc_bind_address" required:"false"`
// The minimum and maximum port
- // to use for VNC access to the virtual machine. The builder uses VNC to type
- // the initial boot_command. Because Packer generally runs in parallel,
- // Packer uses a randomly chosen port in this range that appears available. By
- // default this is 5900 to 6000. The minimum and maximum ports are inclusive.
- VNCPortMin int `mapstructure:"vnc_port_min" required:"false"`
- VNCPortMax int `mapstructure:"vnc_port_max"`
+ // to use for VNC access to the virtual machine. The builder uses VNC to type
+ // the initial boot_command. Because Packer generally runs in parallel,
+ // Packer uses a randomly chosen port in this range that appears available. By
+ // default this is 5900 to 6000. The minimum and maximum ports are inclusive.
+ VNCPortMin int `mapstructure:"vnc_port_min" required:"false"`
+ VNCPortMax int `mapstructure:"vnc_port_max"`
// This is the name of the image (QCOW2 or IMG) file for
- // the new virtual machine. By default this is packer-BUILDNAME, where
- // "BUILDNAME" is the name of the build. Currently, no file extension will be
- // used unless it is specified in this option.
- VMName string `mapstructure:"vm_name" required:"false"`
+ // the new virtual machine. By default this is packer-BUILDNAME, where
+ // "BUILDNAME" is the name of the build. Currently, no file extension will be
+ // used unless it is specified in this option.
+ VMName string `mapstructure:"vm_name" required:"false"`
// These are deprecated, but we keep them around for BC
// TODO(@mitchellh): remove
@@ -233,9 +233,9 @@ type Config struct {
// TODO(mitchellh): deprecate
RunOnce bool `mapstructure:"run_once"`
// The amount of time to wait after executing the
- // shutdown_command for the virtual machine to actually shut down. If it
- // doesn't shut down in this time, it is an error. By default, the timeout is
- // 5m or five minutes.
+ // shutdown_command for the virtual machine to actually shut down. If it
+ // doesn't shut down in this time, it is an error. By default, the timeout is
+ // 5m or five minutes.
RawShutdownTimeout string `mapstructure:"shutdown_timeout" required:"false"`
shutdownTimeout time.Duration ``
diff --git a/builder/scaleway/config.go b/builder/scaleway/config.go
index f3909f7cb..1455369d2 100644
--- a/builder/scaleway/config.go
+++ b/builder/scaleway/config.go
@@ -22,47 +22,47 @@ type Config struct {
common.PackerConfig `mapstructure:",squash"`
Comm communicator.Config `mapstructure:",squash"`
// The token to use to authenticate with your account.
- // It can also be specified via environment variable SCALEWAY_API_TOKEN. You
- // can see and generate tokens in the "Credentials"
- // section of the control panel.
- Token string `mapstructure:"api_token" required:"true"`
+ // It can also be specified via environment variable SCALEWAY_API_TOKEN. You
+ // can see and generate tokens in the "Credentials"
+ // section of the control panel.
+ Token string `mapstructure:"api_token" required:"true"`
// The organization id to use to identify your
- // organization. It can also be specified via environment variable
- // SCALEWAY_ORGANIZATION. Your organization id is available in the
- // "Account" section of the
- // control panel.
- // Previously named: api_access_key with environment variable: SCALEWAY_API_ACCESS_KEY
+ // organization. It can also be specified via environment variable
+ // SCALEWAY_ORGANIZATION. Your organization id is available in the
+ // "Account" section of the
+ // control panel.
+ // Previously named: api_access_key with environment variable: SCALEWAY_API_ACCESS_KEY
Organization string `mapstructure:"organization_id" required:"true"`
// The name of the region to launch the server in (par1
- // or ams1). Consequently, this is the region where the snapshot will be
- // available.
- Region string `mapstructure:"region" required:"true"`
+ // or ams1). Consequently, this is the region where the snapshot will be
+ // available.
+ Region string `mapstructure:"region" required:"true"`
// The UUID of the base image to use. This is the image
- // that will be used to launch a new server and provision it. See
- // the images list
- // get the complete list of the accepted image UUID.
- Image string `mapstructure:"image" required:"true"`
+ // that will be used to launch a new server and provision it. See
+ // the images list
+ // get the complete list of the accepted image UUID.
+ Image string `mapstructure:"image" required:"true"`
// The name of the server commercial type:
- // ARM64-128GB, ARM64-16GB, ARM64-2GB, ARM64-32GB, ARM64-4GB,
- // ARM64-64GB, ARM64-8GB, C1, C2L, C2M, C2S, START1-L,
- // START1-M, START1-S, START1-XS, X64-120GB, X64-15GB, X64-30GB,
- // X64-60GB
+ // ARM64-128GB, ARM64-16GB, ARM64-2GB, ARM64-32GB, ARM64-4GB,
+ // ARM64-64GB, ARM64-8GB, C1, C2L, C2M, C2S, START1-L,
+ // START1-M, START1-S, START1-XS, X64-120GB, X64-15GB, X64-30GB,
+ // X64-60GB
CommercialType string `mapstructure:"commercial_type" required:"true"`
// The name of the resulting snapshot that will
- // appear in your account. Default packer-TIMESTAMP
+ // appear in your account. Default packer-TIMESTAMP
SnapshotName string `mapstructure:"snapshot_name" required:"false"`
// The name of the resulting image that will appear in
- // your account. Default packer-TIMESTAMP
- ImageName string `mapstructure:"image_name" required:"false"`
+ // your account. Default packer-TIMESTAMP
+ ImageName string `mapstructure:"image_name" required:"false"`
// The name assigned to the server. Default
- // packer-UUID
- ServerName string `mapstructure:"server_name" required:"false"`
+ // packer-UUID
+ ServerName string `mapstructure:"server_name" required:"false"`
// The id of an existing bootscript to use when
- // booting the server.
- Bootscript string `mapstructure:"bootscript" required:"false"`
+ // booting the server.
+ Bootscript string `mapstructure:"bootscript" required:"false"`
// The type of boot, can be either local or
- // bootscript, Default bootscript
- BootType string `mapstructure:"boottype" required:"false"`
+ // bootscript, Default bootscript
+ BootType string `mapstructure:"boottype" required:"false"`
UserAgent string
ctx interpolate.Context
diff --git a/builder/tencentcloud/cvm/access_config.go b/builder/tencentcloud/cvm/access_config.go
index 17ff5f74b..cb4790131 100644
--- a/builder/tencentcloud/cvm/access_config.go
+++ b/builder/tencentcloud/cvm/access_config.go
@@ -47,21 +47,21 @@ var ValidRegions = []Region{
type TencentCloudAccessConfig struct {
// Tencentcloud secret id. You should set it directly,
- // or set the TENCENTCLOUD_ACCESS_KEY environment variable.
- SecretId string `mapstructure:"secret_id" required:"true"`
+ // or set the TENCENTCLOUD_ACCESS_KEY environment variable.
+ SecretId string `mapstructure:"secret_id" required:"true"`
// Tencentcloud secret key. You should set it directly,
- // or set the TENCENTCLOUD_SECRET_KEY environment variable.
- SecretKey string `mapstructure:"secret_key" required:"true"`
+ // or set the TENCENTCLOUD_SECRET_KEY environment variable.
+ SecretKey string `mapstructure:"secret_key" required:"true"`
// The region where your cvm will be launch. You should
- // reference Region and Zone
- // for parameter taking.
- Region string `mapstructure:"region" required:"true"`
+ // reference Region and Zone
+ // for parameter taking.
+ Region string `mapstructure:"region" required:"true"`
// The zone where your cvm will be launch. You should
- // reference Region and Zone
- // for parameter taking.
- Zone string `mapstructure:"zone" required:"true"`
+ // reference Region and Zone
+ // for parameter taking.
+ Zone string `mapstructure:"zone" required:"true"`
// Do not check region and zone when validate.
- SkipValidation bool `mapstructure:"skip_region_validation" required:"false"`
+ SkipValidation bool `mapstructure:"skip_region_validation" required:"false"`
}
func (cf *TencentCloudAccessConfig) Client() (*cvm.Client, *vpc.Client, error) {
diff --git a/builder/tencentcloud/cvm/image_config.go b/builder/tencentcloud/cvm/image_config.go
index 57eb18490..ccfc83f37 100644
--- a/builder/tencentcloud/cvm/image_config.go
+++ b/builder/tencentcloud/cvm/image_config.go
@@ -11,28 +11,28 @@ import (
type TencentCloudImageConfig struct {
// The name you want to create your customize image,
- // it should be composed of no more than 20 characters, of letters, numbers
- // or minus sign.
- ImageName string `mapstructure:"image_name" required:"true"`
+ // it should be composed of no more than 20 characters, of letters, numbers
+ // or minus sign.
+ ImageName string `mapstructure:"image_name" required:"true"`
// Image description.
- ImageDescription string `mapstructure:"image_description" required:"false"`
+ ImageDescription string `mapstructure:"image_description" required:"false"`
// Whether shutdown cvm to create Image. Default value is
- // false.
- Reboot bool `mapstructure:"reboot" required:"false"`
+ // false.
+ Reboot bool `mapstructure:"reboot" required:"false"`
// Whether to force power off cvm when create image.
- // Default value is false.
- ForcePoweroff bool `mapstructure:"force_poweroff" required:"false"`
+ // Default value is false.
+ ForcePoweroff bool `mapstructure:"force_poweroff" required:"false"`
// Whether enable Sysprep during creating windows image.
- Sysprep bool `mapstructure:"sysprep" required:"false"`
- ImageForceDelete bool `mapstructure:"image_force_delete"`
+ Sysprep bool `mapstructure:"sysprep" required:"false"`
+ ImageForceDelete bool `mapstructure:"image_force_delete"`
// regions that will be copied to after
- // your image created.
- ImageCopyRegions []string `mapstructure:"image_copy_regions" required:"false"`
+ // your image created.
+ ImageCopyRegions []string `mapstructure:"image_copy_regions" required:"false"`
// accounts that will be shared to
- // after your image created.
+ // after your image created.
ImageShareAccounts []string `mapstructure:"image_share_accounts" required:"false"`
// Do not check region and zone when validate.
- SkipValidation bool `mapstructure:"skip_region_validation" required:"false"`
+ SkipValidation bool `mapstructure:"skip_region_validation" required:"false"`
}
func (cf *TencentCloudImageConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/tencentcloud/cvm/run_config.go b/builder/tencentcloud/cvm/run_config.go
index 21fae66fe..91b06afbb 100644
--- a/builder/tencentcloud/cvm/run_config.go
+++ b/builder/tencentcloud/cvm/run_config.go
@@ -14,53 +14,53 @@ import (
type TencentCloudRunConfig struct {
// Whether allocate public ip to your cvm.
- // Default value is false.
- AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address" required:"false"`
+ // Default value is false.
+ AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address" required:"false"`
// The base image id of Image you want to create
- // your customized image from.
- SourceImageId string `mapstructure:"source_image_id" required:"true"`
+ // your customized image from.
+ SourceImageId string `mapstructure:"source_image_id" required:"true"`
// The instance type your cvm will be launched by.
- // You should reference Instace Type
- // for parameter taking.
- InstanceType string `mapstructure:"instance_type" required:"true"`
+ // You should reference Instace Type
+ // for parameter taking.
+ InstanceType string `mapstructure:"instance_type" required:"true"`
// Instance name.
- InstanceName string `mapstructure:"instance_name" required:"false"`
+ InstanceName string `mapstructure:"instance_name" required:"false"`
// Root disk type your cvm will be launched by. you could
- // reference Disk Type
- // for parameter taking.
- DiskType string `mapstructure:"disk_type" required:"false"`
+ // reference Disk Type
+ // for parameter taking.
+ DiskType string `mapstructure:"disk_type" required:"false"`
// Root disk size your cvm will be launched by. values range(in GB):
- DiskSize int64 `mapstructure:"disk_size" required:"false"`
+ DiskSize int64 `mapstructure:"disk_size" required:"false"`
// Specify vpc your cvm will be launched by.
- VpcId string `mapstructure:"vpc_id" required:"false"`
+ VpcId string `mapstructure:"vpc_id" required:"false"`
// Specify vpc name you will create. if vpc_id is not set, packer will
- // create a vpc for you named this parameter.
- VpcName string `mapstructure:"vpc_name" required:"false"`
- VpcIp string `mapstructure:"vpc_ip"`
+ // create a vpc for you named this parameter.
+ VpcName string `mapstructure:"vpc_name" required:"false"`
+ VpcIp string `mapstructure:"vpc_ip"`
// Specify subnet your cvm will be launched by.
- SubnetId string `mapstructure:"subnet_id" required:"false"`
+ SubnetId string `mapstructure:"subnet_id" required:"false"`
// Specify subnet name you will create. if subnet_id is not set, packer will
- // create a subnet for you named this parameter.
- SubnetName string `mapstructure:"subnet_name" required:"false"`
+ // create a subnet for you named this parameter.
+ SubnetName string `mapstructure:"subnet_name" required:"false"`
// Specify cider block of the vpc you will create if vpc_id not set
- CidrBlock string `mapstructure:"cidr_block" required:"false"` // 10.0.0.0/16(default), 172.16.0.0/12, 192.168.0.0/16
+ CidrBlock string `mapstructure:"cidr_block" required:"false"` // 10.0.0.0/16(default), 172.16.0.0/12, 192.168.0.0/16
// Specify cider block of the subnet you will create if
- // subnet_id not set
- SubnectCidrBlock string `mapstructure:"subnect_cidr_block" required:"false"`
- InternetChargeType string `mapstructure:"internet_charge_type"`
+ // subnet_id not set
+ SubnectCidrBlock string `mapstructure:"subnect_cidr_block" required:"false"`
+ InternetChargeType string `mapstructure:"internet_charge_type"`
// Max bandwidth out your cvm will be launched by(in MB).
- // values can be set between 1 ~ 100.
- InternetMaxBandwidthOut int64 `mapstructure:"internet_max_bandwidth_out" required:"false"`
+ // values can be set between 1 ~ 100.
+ InternetMaxBandwidthOut int64 `mapstructure:"internet_max_bandwidth_out" required:"false"`
// Specify security group your cvm will be launched by.
- SecurityGroupId string `mapstructure:"security_group_id" required:"false"`
+ SecurityGroupId string `mapstructure:"security_group_id" required:"false"`
// Specify security name you will create if security_group_id not set.
- SecurityGroupName string `mapstructure:"security_group_name" required:"false"`
+ SecurityGroupName string `mapstructure:"security_group_name" required:"false"`
// userdata.
- UserData string `mapstructure:"user_data" required:"false"`
+ UserData string `mapstructure:"user_data" required:"false"`
// userdata file.
- UserDataFile string `mapstructure:"user_data_file" required:"false"`
+ UserDataFile string `mapstructure:"user_data_file" required:"false"`
// host name.
- HostName string `mapstructure:"host_name" required:"false"`
+ HostName string `mapstructure:"host_name" required:"false"`
// Communicator settings
Comm communicator.Config `mapstructure:",squash"`
diff --git a/builder/triton/access_config.go b/builder/triton/access_config.go
index 374fcbed5..2ffaf5e1a 100644
--- a/builder/triton/access_config.go
+++ b/builder/triton/access_config.go
@@ -20,31 +20,31 @@ import (
// AccessConfig is for common configuration related to Triton access
type AccessConfig struct {
// The URL of the Triton cloud API to use. If omitted
- // it will default to the us-sw-1 region of the Joyent Public cloud. If you
- // are using your own private Triton installation you will have to supply the
- // URL of the cloud API of your own Triton installation.
- Endpoint string `mapstructure:"triton_url" required:"false"`
+ // it will default to the us-sw-1 region of the Joyent Public cloud. If you
+ // are using your own private Triton installation you will have to supply the
+ // URL of the cloud API of your own Triton installation.
+ Endpoint string `mapstructure:"triton_url" required:"false"`
// The username of the Triton account to use when
- // using the Triton Cloud API.
- Account string `mapstructure:"triton_account" required:"true"`
+ // using the Triton Cloud API.
+ Account string `mapstructure:"triton_account" required:"true"`
// The username of a user who has access to your
- // Triton account.
- Username string `mapstructure:"triton_user" required:"false"`
+ // Triton account.
+ Username string `mapstructure:"triton_user" required:"false"`
// The fingerprint of the public key of the SSH key
- // pair to use for authentication with the Triton Cloud API. If
- // triton_key_material is not set, it is assumed that the SSH agent has the
- // private key corresponding to this key ID loaded.
- KeyID string `mapstructure:"triton_key_id" required:"true"`
+ // pair to use for authentication with the Triton Cloud API. If
+ // triton_key_material is not set, it is assumed that the SSH agent has the
+ // private key corresponding to this key ID loaded.
+ KeyID string `mapstructure:"triton_key_id" required:"true"`
// Path to the file in which the private key
- // of triton_key_id is stored. For example /home/soandso/.ssh/id_rsa. If
- // this is not specified, the SSH agent is used to sign requests with the
- // triton_key_id specified.
- KeyMaterial string `mapstructure:"triton_key_material" required:"false"`
+ // of triton_key_id is stored. For example /home/soandso/.ssh/id_rsa. If
+ // this is not specified, the SSH agent is used to sign requests with the
+ // triton_key_id specified.
+ KeyMaterial string `mapstructure:"triton_key_material" required:"false"`
//secure_skip_tls_verify - (bool) This allows skipping TLS verification
- // of the Triton endpoint. It is useful when connecting to a temporary Triton
- // installation such as Cloud-On-A-Laptop which does not generally use a
- // certificate signed by a trusted root CA. The default is false.
- InsecureSkipTLSVerify bool `mapstructure:"insecure_skip_tls_verify" required:"false"`
+ // of the Triton endpoint. It is useful when connecting to a temporary Triton
+ // installation such as Cloud-On-A-Laptop which does not generally use a
+ // certificate signed by a trusted root CA. The default is false.
+ InsecureSkipTLSVerify bool `mapstructure:"insecure_skip_tls_verify" required:"false"`
signer authentication.Signer
}
diff --git a/builder/triton/source_machine_config.go b/builder/triton/source_machine_config.go
index 93c09e4f5..a31688ec5 100644
--- a/builder/triton/source_machine_config.go
+++ b/builder/triton/source_machine_config.go
@@ -12,59 +12,59 @@ import (
// the SDC API in order for provisioning to take place.
type SourceMachineConfig struct {
// Name of the VM used for building the
- // image. Does not affect (and does not have to be the same) as the name for a
- // VM instance running this image. Maximum 512 characters but should in
- // practice be much shorter (think between 5 and 20 characters). For example
- // mysql-64-server-image-builder. When omitted defaults to
- // packer-builder-[image_name].
- MachineName string `mapstructure:"source_machine_name" required:"false"`
+ // image. Does not affect (and does not have to be the same) as the name for a
+ // VM instance running this image. Maximum 512 characters but should in
+ // practice be much shorter (think between 5 and 20 characters). For example
+ // mysql-64-server-image-builder. When omitted defaults to
+ // packer-builder-[image_name].
+ MachineName string `mapstructure:"source_machine_name" required:"false"`
// The Triton package to use while
- // building the image. Does not affect (and does not have to be the same) as
- // the package which will be used for a VM instance running this image. On the
- // Joyent public cloud this could for example be g3-standard-0.5-smartos.
- MachinePackage string `mapstructure:"source_machine_package" required:"true"`
+ // building the image. Does not affect (and does not have to be the same) as
+ // the package which will be used for a VM instance running this image. On the
+ // Joyent public cloud this could for example be g3-standard-0.5-smartos.
+ MachinePackage string `mapstructure:"source_machine_package" required:"true"`
// The UUID of the image to base the new
- // image on. Triton supports multiple types of images, called 'brands' in
- // Triton / Joyent lingo, for contains and VM's. See the chapter Containers
- // and virtual machines in
- // the Joyent Triton documentation for detailed information. The following
- // brands are currently supported by this builder:joyent andkvm. The
- // choice of base image automatically decides the brand. On the Joyent public
- // cloud a valid source_machine_image could for example be
- // 70e3ae72-96b6-11e6-9056-9737fd4d0764 for version 16.3.1 of the 64bit
- // SmartOS base image (a 'joyent' brand image). source_machine_image_filter
- // can be used to populate this UUID.
- MachineImage string `mapstructure:"source_machine_image" required:"true"`
+ // image on. Triton supports multiple types of images, called 'brands' in
+ // Triton / Joyent lingo, for contains and VM's. See the chapter Containers
+ // and virtual machines in
+ // the Joyent Triton documentation for detailed information. The following
+ // brands are currently supported by this builder:joyent andkvm. The
+ // choice of base image automatically decides the brand. On the Joyent public
+ // cloud a valid source_machine_image could for example be
+ // 70e3ae72-96b6-11e6-9056-9737fd4d0764 for version 16.3.1 of the 64bit
+ // SmartOS base image (a 'joyent' brand image). source_machine_image_filter
+ // can be used to populate this UUID.
+ MachineImage string `mapstructure:"source_machine_image" required:"true"`
// The UUID's of Triton
- // networks added to the source machine used for creating the image. For
- // example if any of the provisioners which are run need Internet access you
- // will need to add the UUID's of the appropriate networks here. If this is
- // not specified, instances will be placed into the default Triton public and
- // internal networks.
- MachineNetworks []string `mapstructure:"source_machine_networks" required:"false"`
+ // networks added to the source machine used for creating the image. For
+ // example if any of the provisioners which are run need Internet access you
+ // will need to add the UUID's of the appropriate networks here. If this is
+ // not specified, instances will be placed into the default Triton public and
+ // internal networks.
+ MachineNetworks []string `mapstructure:"source_machine_networks" required:"false"`
// Triton metadata
- // applied to the VM used to create the image. Metadata can be used to pass
- // configuration information to the VM without the need for networking. See
- // Using the metadata
- // API in the
- // Joyent documentation for more information. This can for example be used to
- // set the user-script metadata key to have Triton start a user supplied
- // script after the VM has booted.
- MachineMetadata map[string]string `mapstructure:"source_machine_metadata" required:"false"`
+ // applied to the VM used to create the image. Metadata can be used to pass
+ // configuration information to the VM without the need for networking. See
+ // Using the metadata
+ // API in the
+ // Joyent documentation for more information. This can for example be used to
+ // set the user-script metadata key to have Triton start a user supplied
+ // script after the VM has booted.
+ MachineMetadata map[string]string `mapstructure:"source_machine_metadata" required:"false"`
// Tags applied to the
- // VM used to create the image.
- MachineTags map[string]string `mapstructure:"source_machine_tags" required:"false"`
+ // VM used to create the image.
+ MachineTags map[string]string `mapstructure:"source_machine_tags" required:"false"`
// Whether or not the firewall
- // of the VM used to create an image of is enabled. The Triton firewall only
- // filters inbound traffic to the VM. All outbound traffic is always allowed.
- // Currently this builder does not provide an interface to add specific
- // firewall rules. Unless you have a global rule defined in Triton which
- // allows SSH traffic enabling the firewall will interfere with the SSH
- // provisioner. The default is false.
- MachineFirewallEnabled bool `mapstructure:"source_machine_firewall_enabled" required:"false"`
+ // of the VM used to create an image of is enabled. The Triton firewall only
+ // filters inbound traffic to the VM. All outbound traffic is always allowed.
+ // Currently this builder does not provide an interface to add specific
+ // firewall rules. Unless you have a global rule defined in Triton which
+ // allows SSH traffic enabling the firewall will interfere with the SSH
+ // provisioner. The default is false.
+ MachineFirewallEnabled bool `mapstructure:"source_machine_firewall_enabled" required:"false"`
// Filters used to populate the
- // source_machine_image field. Example:
- MachineImageFilters MachineImageFilter `mapstructure:"source_machine_image_filter" required:"false"`
+ // source_machine_image field. Example:
+ MachineImageFilters MachineImageFilter `mapstructure:"source_machine_image_filter" required:"false"`
}
type MachineImageFilter struct {
diff --git a/builder/triton/target_image_config.go b/builder/triton/target_image_config.go
index 6ee195428..a0a5c73ef 100644
--- a/builder/triton/target_image_config.go
+++ b/builder/triton/target_image_config.go
@@ -12,30 +12,30 @@ import (
// from the source machine.
type TargetImageConfig struct {
// The name the finished image in Triton will be
- // assigned. Maximum 512 characters but should in practice be much shorter
- // (think between 5 and 20 characters). For example postgresql-95-server for
- // an image used as a PostgreSQL 9.5 server.
- ImageName string `mapstructure:"image_name" required:"true"`
+ // assigned. Maximum 512 characters but should in practice be much shorter
+ // (think between 5 and 20 characters). For example postgresql-95-server for
+ // an image used as a PostgreSQL 9.5 server.
+ ImageName string `mapstructure:"image_name" required:"true"`
// The version string for this image. Maximum 128
- // characters. Any string will do but a format of Major.Minor.Patch is
- // strongly advised by Joyent. See Semantic Versioning
- // for more information on the Major.Minor.Patch versioning format.
- ImageVersion string `mapstructure:"image_version" required:"true"`
+ // characters. Any string will do but a format of Major.Minor.Patch is
+ // strongly advised by Joyent. See Semantic Versioning
+ // for more information on the Major.Minor.Patch versioning format.
+ ImageVersion string `mapstructure:"image_version" required:"true"`
// Description of the image. Maximum 512
- // characters.
- ImageDescription string `mapstructure:"image_description" required:"false"`
+ // characters.
+ ImageDescription string `mapstructure:"image_description" required:"false"`
// URL of the homepage where users can find
- // information about the image. Maximum 128 characters.
- ImageHomepage string `mapstructure:"image_homepage" required:"false"`
+ // information about the image. Maximum 128 characters.
+ ImageHomepage string `mapstructure:"image_homepage" required:"false"`
// URL of the End User License Agreement (EULA)
- // for the image. Maximum 128 characters.
- ImageEULA string `mapstructure:"image_eula_url" required:"false"`
+ // for the image. Maximum 128 characters.
+ ImageEULA string `mapstructure:"image_eula_url" required:"false"`
// The UUID's of the users which will have
- // access to this image. When omitted only the owner (the Triton user whose
- // credentials are used) will have access to the image.
- ImageACL []string `mapstructure:"image_acls" required:"false"`
+ // access to this image. When omitted only the owner (the Triton user whose
+ // credentials are used) will have access to the image.
+ ImageACL []string `mapstructure:"image_acls" required:"false"`
// Tag applied to the image.
- ImageTags map[string]string `mapstructure:"image_tags" required:"false"`
+ ImageTags map[string]string `mapstructure:"image_tags" required:"false"`
}
// Prepare performs basic validation on a TargetImageConfig struct.
diff --git a/builder/virtualbox/common/export_config.go b/builder/virtualbox/common/export_config.go
index af38c0d78..9a66626ce 100644
--- a/builder/virtualbox/common/export_config.go
+++ b/builder/virtualbox/common/export_config.go
@@ -10,7 +10,7 @@ import (
type ExportConfig struct {
// Either ovf or ova, this specifies the output format
- // of the exported virtual machine. This defaults to ovf.
+ // of the exported virtual machine. This defaults to ovf.
Format string `mapstructure:"format" required:"false"`
}
diff --git a/builder/virtualbox/common/export_opts.go b/builder/virtualbox/common/export_opts.go
index 3a802a314..b5a22072c 100644
--- a/builder/virtualbox/common/export_opts.go
+++ b/builder/virtualbox/common/export_opts.go
@@ -8,10 +8,10 @@ import (
type ExportOpts struct {
// Additional options to pass to the
- // VBoxManage
- // export. This
- // can be useful for passing product information to include in the resulting
- // appliance file. Packer JSON configuration file example:
+ // VBoxManage
+ // export. This
+ // can be useful for passing product information to include in the resulting
+ // appliance file. Packer JSON configuration file example:
ExportOpts []string `mapstructure:"export_opts" required:"false"`
}
diff --git a/builder/virtualbox/common/guest_additions_config.go b/builder/virtualbox/common/guest_additions_config.go
index 082d10b21..e0f015dba 100644
--- a/builder/virtualbox/common/guest_additions_config.go
+++ b/builder/virtualbox/common/guest_additions_config.go
@@ -17,14 +17,14 @@ const (
)
type GuestAdditionsConfig struct {
- Communicator string `mapstructure:"communicator"`
+ Communicator string `mapstructure:"communicator"`
// The method by which guest additions are
- // made available to the guest for installation. Valid options are upload,
- // attach, or disable. If the mode is attach the guest additions ISO will
- // be attached as a CD device to the virtual machine. If the mode is upload
- // the guest additions ISO will be uploaded to the path specified by
- // guest_additions_path. The default value is upload. If disable is used,
- // guest additions won't be downloaded, either.
+ // made available to the guest for installation. Valid options are upload,
+ // attach, or disable. If the mode is attach the guest additions ISO will
+ // be attached as a CD device to the virtual machine. If the mode is upload
+ // the guest additions ISO will be uploaded to the path specified by
+ // guest_additions_path. The default value is upload. If disable is used,
+ // guest additions won't be downloaded, either.
GuestAdditionsMode string `mapstructure:"guest_additions_mode" required:"false"`
}
diff --git a/builder/virtualbox/common/hw_config.go b/builder/virtualbox/common/hw_config.go
index 2f7d6ebfe..739bc1850 100644
--- a/builder/virtualbox/common/hw_config.go
+++ b/builder/virtualbox/common/hw_config.go
@@ -10,18 +10,18 @@ import (
type HWConfig struct {
// The number of cpus to use for building the VM.
- // Defaults to 1.
- CpuCount int `mapstructure:"cpus" required:"false"`
+ // Defaults to 1.
+ CpuCount int `mapstructure:"cpus" required:"false"`
// The amount of memory to use for building the VM
- // in megabytes. Defaults to 512 megabytes.
+ // in megabytes. Defaults to 512 megabytes.
MemorySize int `mapstructure:"memory" required:"false"`
// Defaults to none. The type of audio device to use for
- // sound when building the VM. Some of the options that are available are
- // dsound, oss, alsa, pulse, coreaudio, null.
+ // sound when building the VM. Some of the options that are available are
+ // dsound, oss, alsa, pulse, coreaudio, null.
Sound string `mapstructure:"sound" required:"false"`
// Specifies whether or not to enable the USB bus when
- // building the VM. Defaults to false.
- USB bool `mapstructure:"usb" required:"false"`
+ // building the VM. Defaults to false.
+ USB bool `mapstructure:"usb" required:"false"`
}
func (c *HWConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/virtualbox/common/output_config.go b/builder/virtualbox/common/output_config.go
index 6fa30ebbc..e99919dcb 100644
--- a/builder/virtualbox/common/output_config.go
+++ b/builder/virtualbox/common/output_config.go
@@ -11,11 +11,11 @@ import (
type OutputConfig struct {
// This is the path to the directory where the
- // resulting virtual machine will be created. This may be relative or absolute.
- // If relative, the path is relative to the working directory when packer
- // is executed. This directory must not exist or be empty prior to running
- // the builder. By default this is output-BUILDNAME where "BUILDNAME" is the
- // name of the build.
+ // resulting virtual machine will be created. This may be relative or absolute.
+ // If relative, the path is relative to the working directory when packer
+ // is executed. This directory must not exist or be empty prior to running
+ // the builder. By default this is output-BUILDNAME where "BUILDNAME" is the
+ // name of the build.
OutputDir string `mapstructure:"output_directory" required:"false"`
}
diff --git a/builder/virtualbox/common/run_config.go b/builder/virtualbox/common/run_config.go
index 331d3d195..14c2bf923 100644
--- a/builder/virtualbox/common/run_config.go
+++ b/builder/virtualbox/common/run_config.go
@@ -10,20 +10,20 @@ import (
type RunConfig struct {
// Packer defaults to building VirtualBox virtual
- // machines by launching a GUI that shows the console of the machine
- // being built. When this value is set to true, the machine will start
- // without a console.
+ // machines by launching a GUI that shows the console of the machine
+ // being built. When this value is set to true, the machine will start
+ // without a console.
Headless bool `mapstructure:"headless" required:"false"`
// The IP address that should be
- // binded to for VRDP. By default packer will use 127.0.0.1 for this. If you
- // wish to bind to all interfaces use 0.0.0.0.
+ // binded to for VRDP. By default packer will use 127.0.0.1 for this. If you
+ // wish to bind to all interfaces use 0.0.0.0.
VRDPBindAddress string `mapstructure:"vrdp_bind_address" required:"false"`
// The minimum and maximum port
- // to use for VRDP access to the virtual machine. Packer uses a randomly chosen
- // port in this range that appears available. By default this is 5900 to
- // 6000. The minimum and maximum ports are inclusive.
- VRDPPortMin int `mapstructure:"vrdp_port_min" required:"false"`
- VRDPPortMax int `mapstructure:"vrdp_port_max"`
+ // to use for VRDP access to the virtual machine. Packer uses a randomly chosen
+ // port in this range that appears available. By default this is 5900 to
+ // 6000. The minimum and maximum ports are inclusive.
+ VRDPPortMin int `mapstructure:"vrdp_port_min" required:"false"`
+ VRDPPortMax int `mapstructure:"vrdp_port_max"`
}
func (c *RunConfig) Prepare(ctx *interpolate.Context) (errs []error) {
diff --git a/builder/virtualbox/common/shutdown_config.go b/builder/virtualbox/common/shutdown_config.go
index 85252ed7a..63b58d8a5 100644
--- a/builder/virtualbox/common/shutdown_config.go
+++ b/builder/virtualbox/common/shutdown_config.go
@@ -11,22 +11,22 @@ import (
type ShutdownConfig struct {
// The command to use to gracefully shut down the
- // machine once all the provisioning is done. By default this is an empty
- // string, which tells Packer to just forcefully shut down the machine unless a
- // shutdown command takes place inside script so this may safely be omitted. If
- // one or more scripts require a reboot it is suggested to leave this blank
- // since reboots may fail and specify the final shutdown command in your
- // last script.
- ShutdownCommand string `mapstructure:"shutdown_command" required:"false"`
+ // machine once all the provisioning is done. By default this is an empty
+ // string, which tells Packer to just forcefully shut down the machine unless a
+ // shutdown command takes place inside script so this may safely be omitted. If
+ // one or more scripts require a reboot it is suggested to leave this blank
+ // since reboots may fail and specify the final shutdown command in your
+ // last script.
+ ShutdownCommand string `mapstructure:"shutdown_command" required:"false"`
// The amount of time to wait after executing the
- // shutdown_command for the virtual machine to actually shut down. If it
- // doesn't shut down in this time, it is an error. By default, the timeout is
- // 5m or five minutes.
- RawShutdownTimeout string `mapstructure:"shutdown_timeout" required:"false"`
+ // shutdown_command for the virtual machine to actually shut down. If it
+ // doesn't shut down in this time, it is an error. By default, the timeout is
+ // 5m or five minutes.
+ RawShutdownTimeout string `mapstructure:"shutdown_timeout" required:"false"`
// The amount of time to wait after shutting
- // down the virtual machine. If you get the error
- // Error removing floppy controller, you might need to set this to 5m
- // or so. By default, the delay is 0s or disabled.
+ // down the virtual machine. If you get the error
+ // Error removing floppy controller, you might need to set this to 5m
+ // or so. By default, the delay is 0s or disabled.
RawPostShutdownDelay string `mapstructure:"post_shutdown_delay" required:"false"`
ShutdownTimeout time.Duration ``
diff --git a/builder/virtualbox/common/ssh_config.go b/builder/virtualbox/common/ssh_config.go
index c78008be8..074415535 100644
--- a/builder/virtualbox/common/ssh_config.go
+++ b/builder/virtualbox/common/ssh_config.go
@@ -13,15 +13,15 @@ import (
type SSHConfig struct {
Comm communicator.Config `mapstructure:",squash"`
// The minimum and
- // maximum port to use for the SSH port on the host machine which is forwarded
- // to the SSH port on the guest machine. Because Packer often runs in parallel,
- // Packer will choose a randomly available port in this range to use as the
- // host port. By default this is 2222 to 4444.
- SSHHostPortMin int `mapstructure:"ssh_host_port_min" required:"false"`
- SSHHostPortMax int `mapstructure:"ssh_host_port_max"`
+ // maximum port to use for the SSH port on the host machine which is forwarded
+ // to the SSH port on the guest machine. Because Packer often runs in parallel,
+ // Packer will choose a randomly available port in this range to use as the
+ // host port. By default this is 2222 to 4444.
+ SSHHostPortMin int `mapstructure:"ssh_host_port_min" required:"false"`
+ SSHHostPortMax int `mapstructure:"ssh_host_port_max"`
// Defaults to false. When enabled, Packer
- // does not setup forwarded port mapping for SSH requests and uses ssh_port
- // on the host to communicate to the virtual machine.
+ // does not setup forwarded port mapping for SSH requests and uses ssh_port
+ // on the host to communicate to the virtual machine.
SSHSkipNatMapping bool `mapstructure:"ssh_skip_nat_mapping" required:"false"`
// These are deprecated, but we keep them around for BC
diff --git a/builder/virtualbox/common/vbox_version_config.go b/builder/virtualbox/common/vbox_version_config.go
index 4d911f48e..4e81f4fa0 100644
--- a/builder/virtualbox/common/vbox_version_config.go
+++ b/builder/virtualbox/common/vbox_version_config.go
@@ -9,13 +9,13 @@ import (
)
type VBoxVersionConfig struct {
- Communicator string `mapstructure:"communicator"`
+ Communicator string `mapstructure:"communicator"`
// The path within the virtual machine to
- // upload a file that contains the VirtualBox version that was used to create
- // the machine. This information can be useful for provisioning. By default
- // this is .vbox_version, which will generally be upload it into the
- // home directory. Set to an empty string to skip uploading this file, which
- // can be useful when using the none communicator.
+ // upload a file that contains the VirtualBox version that was used to create
+ // the machine. This information can be useful for provisioning. By default
+ // this is .vbox_version, which will generally be upload it into the
+ // home directory. Set to an empty string to skip uploading this file, which
+ // can be useful when using the none communicator.
VBoxVersionFile *string `mapstructure:"virtualbox_version_file" required:"false"`
}
diff --git a/builder/virtualbox/common/vboxbundle_config.go b/builder/virtualbox/common/vboxbundle_config.go
index 586609987..8936eacf2 100644
--- a/builder/virtualbox/common/vboxbundle_config.go
+++ b/builder/virtualbox/common/vboxbundle_config.go
@@ -8,9 +8,9 @@ import (
type VBoxBundleConfig struct {
// Defaults to false. When enabled, Packer includes
- // any attached ISO disc devices into the final virtual machine. Useful for
- // some live distributions that require installation media to continue to be
- // attached after installation.
+ // any attached ISO disc devices into the final virtual machine. Useful for
+ // some live distributions that require installation media to continue to be
+ // attached after installation.
BundleISO bool `mapstructure:"bundle_iso" required:"false"`
}
diff --git a/builder/virtualbox/common/vboxmanage_config.go b/builder/virtualbox/common/vboxmanage_config.go
index 7027ead1f..c58f89601 100644
--- a/builder/virtualbox/common/vboxmanage_config.go
+++ b/builder/virtualbox/common/vboxmanage_config.go
@@ -8,15 +8,15 @@ import (
type VBoxManageConfig struct {
// Custom VBoxManage commands to
- // execute in order to further customize the virtual machine being created. The
- // value of this is an array of commands to execute. The commands are executed
- // in the order defined in the template. For each command, the command is
- // defined itself as an array of strings, where each string represents a single
- // argument on the command-line to VBoxManage (but excluding
- // VBoxManage itself). Each arg is treated as a configuration
- // template, where the Name
- // variable is replaced with the VM name. More details on how to use
- // VBoxManage are below.
+ // execute in order to further customize the virtual machine being created. The
+ // value of this is an array of commands to execute. The commands are executed
+ // in the order defined in the template. For each command, the command is
+ // defined itself as an array of strings, where each string represents a single
+ // argument on the command-line to VBoxManage (but excluding
+ // VBoxManage itself). Each arg is treated as a configuration
+ // template, where the Name
+ // variable is replaced with the VM name. More details on how to use
+ // VBoxManage are below.
VBoxManage [][]string `mapstructure:"vboxmanage" required:"false"`
}
diff --git a/builder/virtualbox/common/vboxmanage_post_config.go b/builder/virtualbox/common/vboxmanage_post_config.go
index 890a3075d..69b69933e 100644
--- a/builder/virtualbox/common/vboxmanage_post_config.go
+++ b/builder/virtualbox/common/vboxmanage_post_config.go
@@ -8,8 +8,8 @@ import (
type VBoxManagePostConfig struct {
// Identical to vboxmanage,
- // except that it is run after the virtual machine is shutdown, and before the
- // virtual machine is exported.
+ // except that it is run after the virtual machine is shutdown, and before the
+ // virtual machine is exported.
VBoxManagePost [][]string `mapstructure:"vboxmanage_post" required:"false"`
}
diff --git a/builder/virtualbox/iso/builder.go b/builder/virtualbox/iso/builder.go
index e16873b24..167a5bebc 100644
--- a/builder/virtualbox/iso/builder.go
+++ b/builder/virtualbox/iso/builder.go
@@ -44,80 +44,80 @@ type Config struct {
vboxcommon.VBoxBundleConfig `mapstructure:",squash"`
vboxcommon.GuestAdditionsConfig `mapstructure:",squash"`
// The size, in megabytes, of the hard disk to create
- // for the VM. By default, this is 40000 (about 40 GB).
- DiskSize uint `mapstructure:"disk_size" required:"false"`
+ // for the VM. By default, this is 40000 (about 40 GB).
+ DiskSize uint `mapstructure:"disk_size" required:"false"`
// The method by which guest additions are
- // made available to the guest for installation. Valid options are upload,
- // attach, or disable. If the mode is attach the guest additions ISO will
- // be attached as a CD device to the virtual machine. If the mode is upload
- // the guest additions ISO will be uploaded to the path specified by
- // guest_additions_path. The default value is upload. If disable is used,
- // guest additions won't be downloaded, either.
- GuestAdditionsMode string `mapstructure:"guest_additions_mode" required:"false"`
+ // made available to the guest for installation. Valid options are upload,
+ // attach, or disable. If the mode is attach the guest additions ISO will
+ // be attached as a CD device to the virtual machine. If the mode is upload
+ // the guest additions ISO will be uploaded to the path specified by
+ // guest_additions_path. The default value is upload. If disable is used,
+ // guest additions won't be downloaded, either.
+ GuestAdditionsMode string `mapstructure:"guest_additions_mode" required:"false"`
// The path on the guest virtual machine
- // where the VirtualBox guest additions ISO will be uploaded. By default this
- // is VBoxGuestAdditions.iso which should upload into the login directory of
- // the user. This is a configuration
- // template where the Version
- // variable is replaced with the VirtualBox version.
- GuestAdditionsPath string `mapstructure:"guest_additions_path" required:"false"`
+ // where the VirtualBox guest additions ISO will be uploaded. By default this
+ // is VBoxGuestAdditions.iso which should upload into the login directory of
+ // the user. This is a configuration
+ // template where the Version
+ // variable is replaced with the VirtualBox version.
+ GuestAdditionsPath string `mapstructure:"guest_additions_path" required:"false"`
// The SHA256 checksum of the guest
- // additions ISO that will be uploaded to the guest VM. By default the
- // checksums will be downloaded from the VirtualBox website, so this only needs
- // to be set if you want to be explicit about the checksum.
- GuestAdditionsSHA256 string `mapstructure:"guest_additions_sha256" required:"false"`
+ // additions ISO that will be uploaded to the guest VM. By default the
+ // checksums will be downloaded from the VirtualBox website, so this only needs
+ // to be set if you want to be explicit about the checksum.
+ GuestAdditionsSHA256 string `mapstructure:"guest_additions_sha256" required:"false"`
// The URL to the guest additions ISO
- // to upload. This can also be a file URL if the ISO is at a local path. By
- // default, the VirtualBox builder will attempt to find the guest additions ISO
- // on the local file system. If it is not available locally, the builder will
- // download the proper guest additions ISO from the internet.
- GuestAdditionsURL string `mapstructure:"guest_additions_url" required:"false"`
+ // to upload. This can also be a file URL if the ISO is at a local path. By
+ // default, the VirtualBox builder will attempt to find the guest additions ISO
+ // on the local file system. If it is not available locally, the builder will
+ // download the proper guest additions ISO from the internet.
+ GuestAdditionsURL string `mapstructure:"guest_additions_url" required:"false"`
// The interface type to use to mount
- // guest additions when guest_additions_mode is set to attach. Will
- // default to the value set in iso_interface, if iso_interface is set.
- // Will default to "ide", if iso_interface is not set. Options are "ide" and
- // "sata".
+ // guest additions when guest_additions_mode is set to attach. Will
+ // default to the value set in iso_interface, if iso_interface is set.
+ // Will default to "ide", if iso_interface is not set. Options are "ide" and
+ // "sata".
GuestAdditionsInterface string `mapstructure:"guest_additions_interface" required:"false"`
// The guest OS type being installed. By default
- // this is other, but you can get dramatic performance improvements by
- // setting this to the proper value. To view all available values for this run
- // VBoxManage list ostypes. Setting the correct value hints to VirtualBox how
- // to optimize the virtual hardware to work best with that operating system.
- GuestOSType string `mapstructure:"guest_os_type" required:"false"`
+ // this is other, but you can get dramatic performance improvements by
+ // setting this to the proper value. To view all available values for this run
+ // VBoxManage list ostypes. Setting the correct value hints to VirtualBox how
+ // to optimize the virtual hardware to work best with that operating system.
+ GuestOSType string `mapstructure:"guest_os_type" required:"false"`
// When this value is set to true, a VDI
- // image will be shrunk in response to the trim command from the guest OS.
- // The size of the cleared area must be at least 1MB. Also set
- // hard_drive_nonrotational to true to enable TRIM support.
- HardDriveDiscard bool `mapstructure:"hard_drive_discard" required:"false"`
+ // image will be shrunk in response to the trim command from the guest OS.
+ // The size of the cleared area must be at least 1MB. Also set
+ // hard_drive_nonrotational to true to enable TRIM support.
+ HardDriveDiscard bool `mapstructure:"hard_drive_discard" required:"false"`
// The type of controller that the primary
- // hard drive is attached to, defaults to ide. When set to sata, the drive
- // is attached to an AHCI SATA controller. When set to scsi, the drive is
- // attached to an LsiLogic SCSI controller.
- HardDriveInterface string `mapstructure:"hard_drive_interface" required:"false"`
+ // hard drive is attached to, defaults to ide. When set to sata, the drive
+ // is attached to an AHCI SATA controller. When set to scsi, the drive is
+ // attached to an LsiLogic SCSI controller.
+ HardDriveInterface string `mapstructure:"hard_drive_interface" required:"false"`
// The number of ports available on any SATA
- // controller created, defaults to 1. VirtualBox supports up to 30 ports on a
- // maximum of 1 SATA controller. Increasing this value can be useful if you
- // want to attach additional drives.
- SATAPortCount int `mapstructure:"sata_port_count" required:"false"`
+ // controller created, defaults to 1. VirtualBox supports up to 30 ports on a
+ // maximum of 1 SATA controller. Increasing this value can be useful if you
+ // want to attach additional drives.
+ SATAPortCount int `mapstructure:"sata_port_count" required:"false"`
// Forces some guests (i.e. Windows 7+)
- // to treat disks as SSDs and stops them from performing disk fragmentation.
- // Also set hard_drive_discard to true to enable TRIM support.
- HardDriveNonrotational bool `mapstructure:"hard_drive_nonrotational" required:"false"`
+ // to treat disks as SSDs and stops them from performing disk fragmentation.
+ // Also set hard_drive_discard to true to enable TRIM support.
+ HardDriveNonrotational bool `mapstructure:"hard_drive_nonrotational" required:"false"`
// The type of controller that the ISO is attached
- // to, defaults to ide. When set to sata, the drive is attached to an AHCI
- // SATA controller.
- ISOInterface string `mapstructure:"iso_interface" required:"false"`
+ // to, defaults to ide. When set to sata, the drive is attached to an AHCI
+ // SATA controller.
+ ISOInterface string `mapstructure:"iso_interface" required:"false"`
// Set this to true if you would like to keep
- // the VM registered with virtualbox. Defaults to false.
- KeepRegistered bool `mapstructure:"keep_registered" required:"false"`
+ // the VM registered with virtualbox. Defaults to false.
+ KeepRegistered bool `mapstructure:"keep_registered" required:"false"`
// Defaults to false. When enabled, Packer will
- // not export the VM. Useful if the build output is not the resultant image,
- // but created inside the VM.
- SkipExport bool `mapstructure:"skip_export" required:"false"`
+ // not export the VM. Useful if the build output is not the resultant image,
+ // but created inside the VM.
+ SkipExport bool `mapstructure:"skip_export" required:"false"`
// This is the name of the OVF file for the new virtual
- // machine, without the file extension. By default this is packer-BUILDNAME,
- // where "BUILDNAME" is the name of the build.
- VMName string `mapstructure:"vm_name" required:"false"`
+ // machine, without the file extension. By default this is packer-BUILDNAME,
+ // where "BUILDNAME" is the name of the build.
+ VMName string `mapstructure:"vm_name" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/virtualbox/ovf/config.go b/builder/virtualbox/ovf/config.go
index c8b2bf0d2..586ae50ae 100644
--- a/builder/virtualbox/ovf/config.go
+++ b/builder/virtualbox/ovf/config.go
@@ -31,77 +31,77 @@ type Config struct {
vboxcommon.VBoxManagePostConfig `mapstructure:",squash"`
vboxcommon.VBoxVersionConfig `mapstructure:",squash"`
vboxcommon.GuestAdditionsConfig `mapstructure:",squash"`
- // The checksum for the source_path file. The
- // algorithm to use when computing the checksum can be optionally specified
- // with checksum_type. When checksum_type is not set packer will guess the
- // checksumming type based on checksum length. checksum can be also be a
- // file or an URL, in which case checksum_type must be set to file; the
- // go-getter will download it and use the first hash found.
- Checksum string `mapstructure:"checksum" required:"true"`
+ // The checksum for the source_path file. The
+ // algorithm to use when computing the checksum can be optionally specified
+ // with checksum_type. When checksum_type is not set packer will guess the
+ // checksumming type based on checksum length. checksum can be also be a
+ // file or an URL, in which case checksum_type must be set to file; the
+ // go-getter will download it and use the first hash found.
+ Checksum string `mapstructure:"checksum" required:"true"`
// The type of the checksum specified in checksum.
- // Valid values are none, md5, sha1, sha256, or sha512. Although the
- // checksum will not be verified when checksum_type is set to "none", this is
- // not recommended since OVA files can be very large and corruption does happen
- // from time to time.
- ChecksumType string `mapstructure:"checksum_type" required:"false"`
+ // Valid values are none, md5, sha1, sha256, or sha512. Although the
+ // checksum will not be verified when checksum_type is set to "none", this is
+ // not recommended since OVA files can be very large and corruption does happen
+ // from time to time.
+ ChecksumType string `mapstructure:"checksum_type" required:"false"`
// The method by which guest additions are
- // made available to the guest for installation. Valid options are upload,
- // attach, or disable. If the mode is attach the guest additions ISO will
- // be attached as a CD device to the virtual machine. If the mode is upload
- // the guest additions ISO will be uploaded to the path specified by
- // guest_additions_path. The default value is upload. If disable is used,
- // guest additions won't be downloaded, either.
- GuestAdditionsMode string `mapstructure:"guest_additions_mode" required:"false"`
+ // made available to the guest for installation. Valid options are upload,
+ // attach, or disable. If the mode is attach the guest additions ISO will
+ // be attached as a CD device to the virtual machine. If the mode is upload
+ // the guest additions ISO will be uploaded to the path specified by
+ // guest_additions_path. The default value is upload. If disable is used,
+ // guest additions won't be downloaded, either.
+ GuestAdditionsMode string `mapstructure:"guest_additions_mode" required:"false"`
// The path on the guest virtual machine
- // where the VirtualBox guest additions ISO will be uploaded. By default this
- // is VBoxGuestAdditions.iso which should upload into the login directory of
- // the user. This is a configuration
- // template where the Version
- // variable is replaced with the VirtualBox version.
- GuestAdditionsPath string `mapstructure:"guest_additions_path" required:"false"`
+ // where the VirtualBox guest additions ISO will be uploaded. By default this
+ // is VBoxGuestAdditions.iso which should upload into the login directory of
+ // the user. This is a configuration
+ // template where the Version
+ // variable is replaced with the VirtualBox version.
+ GuestAdditionsPath string `mapstructure:"guest_additions_path" required:"false"`
// The interface type to use to mount
- // guest additions when guest_additions_mode is set to attach. Will
- // default to the value set in iso_interface, if iso_interface is set.
- // Will default to "ide", if iso_interface is not set. Options are "ide" and
- // "sata".
- GuestAdditionsInterface string `mapstructure:"guest_additions_interface" required:"false"`
+ // guest additions when guest_additions_mode is set to attach. Will
+ // default to the value set in iso_interface, if iso_interface is set.
+ // Will default to "ide", if iso_interface is not set. Options are "ide" and
+ // "sata".
+ GuestAdditionsInterface string `mapstructure:"guest_additions_interface" required:"false"`
// The SHA256 checksum of the guest
- // additions ISO that will be uploaded to the guest VM. By default the
- // checksums will be downloaded from the VirtualBox website, so this only needs
- // to be set if you want to be explicit about the checksum.
- GuestAdditionsSHA256 string `mapstructure:"guest_additions_sha256" required:"false"`
+ // additions ISO that will be uploaded to the guest VM. By default the
+ // checksums will be downloaded from the VirtualBox website, so this only needs
+ // to be set if you want to be explicit about the checksum.
+ GuestAdditionsSHA256 string `mapstructure:"guest_additions_sha256" required:"false"`
// The URL to the guest additions ISO
- // to upload. This can also be a file URL if the ISO is at a local path. By
- // default, the VirtualBox builder will attempt to find the guest additions ISO
- // on the local file system. If it is not available locally, the builder will
- // download the proper guest additions ISO from the internet.
- GuestAdditionsURL string `mapstructure:"guest_additions_url" required:"false"`
+ // to upload. This can also be a file URL if the ISO is at a local path. By
+ // default, the VirtualBox builder will attempt to find the guest additions ISO
+ // on the local file system. If it is not available locally, the builder will
+ // download the proper guest additions ISO from the internet.
+ GuestAdditionsURL string `mapstructure:"guest_additions_url" required:"false"`
// Additional flags to pass to
- // VBoxManage import. This can be used to add additional command-line flags
- // such as --eula-accept to accept a EULA in the OVF.
- ImportFlags []string `mapstructure:"import_flags" required:"false"`
+ // VBoxManage import. This can be used to add additional command-line flags
+ // such as --eula-accept to accept a EULA in the OVF.
+ ImportFlags []string `mapstructure:"import_flags" required:"false"`
// Additional options to pass to the
- // VBoxManage import. This can be useful for passing keepallmacs or
- // keepnatmacs options for existing ovf images.
- ImportOpts string `mapstructure:"import_opts" required:"false"`
+ // VBoxManage import. This can be useful for passing keepallmacs or
+ // keepnatmacs options for existing ovf images.
+ ImportOpts string `mapstructure:"import_opts" required:"false"`
// The path to an OVF or OVA file that acts as the
- // source of this build. This currently must be a local file.
- SourcePath string `mapstructure:"source_path" required:"true"`
+ // source of this build. This currently must be a local file.
+ SourcePath string `mapstructure:"source_path" required:"true"`
// The path where the OVA should be saved
- // after download. By default, it will go in the packer cache, with a hash of
- // the original filename as its name.
- TargetPath string `mapstructure:"target_path" required:"false"`
+ // after download. By default, it will go in the packer cache, with a hash of
+ // the original filename as its name.
+ TargetPath string `mapstructure:"target_path" required:"false"`
// This is the name of the OVF file for the new virtual
- // machine, without the file extension. By default this is packer-BUILDNAME,
- // where "BUILDNAME" is the name of the build.
- VMName string `mapstructure:"vm_name" required:"false"`
+ // machine, without the file extension. By default this is packer-BUILDNAME,
+ // where "BUILDNAME" is the name of the build.
+ VMName string `mapstructure:"vm_name" required:"false"`
// Set this to true if you would like to keep
- // the VM registered with virtualbox. Defaults to false.
- KeepRegistered bool `mapstructure:"keep_registered" required:"false"`
+ // the VM registered with virtualbox. Defaults to false.
+ KeepRegistered bool `mapstructure:"keep_registered" required:"false"`
// Defaults to false. When enabled, Packer will
- // not export the VM. Useful if the build output is not the resultant image,
- // but created inside the VM.
- SkipExport bool `mapstructure:"skip_export" required:"false"`
+ // not export the VM. Useful if the build output is not the resultant image,
+ // but created inside the VM.
+ SkipExport bool `mapstructure:"skip_export" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/vmware/common/driver_config.go b/builder/vmware/common/driver_config.go
index df8258c69..bee758f73 100644
--- a/builder/vmware/common/driver_config.go
+++ b/builder/vmware/common/driver_config.go
@@ -17,41 +17,41 @@ import (
type DriverConfig struct {
// Path to "VMware Fusion.app". By default this is
- // /Applications/VMware Fusion.app but this setting allows you to
- // customize this.
- FusionAppPath string `mapstructure:"fusion_app_path" required:"false"`
+ // /Applications/VMware Fusion.app but this setting allows you to
+ // customize this.
+ FusionAppPath string `mapstructure:"fusion_app_path" required:"false"`
// The type of remote machine that will be used to
- // build this VM rather than a local desktop product. The only value accepted
- // for this currently is esx5. If this is not set, a desktop product will
- // be used. By default, this is not set.
- RemoteType string `mapstructure:"remote_type" required:"false"`
+ // build this VM rather than a local desktop product. The only value accepted
+ // for this currently is esx5. If this is not set, a desktop product will
+ // be used. By default, this is not set.
+ RemoteType string `mapstructure:"remote_type" required:"false"`
// The path to the datastore where the VM will be stored
- // on the ESXi machine.
- RemoteDatastore string `mapstructure:"remote_datastore" required:"false"`
+ // on the ESXi machine.
+ RemoteDatastore string `mapstructure:"remote_datastore" required:"false"`
// The path to the datastore where supporting files
- // will be stored during the build on the remote machine.
- RemoteCacheDatastore string `mapstructure:"remote_cache_datastore" required:"false"`
+ // will be stored during the build on the remote machine.
+ RemoteCacheDatastore string `mapstructure:"remote_cache_datastore" required:"false"`
// The path where the ISO and/or floppy files will
- // be stored during the build on the remote machine. The path is relative to
- // the remote_cache_datastore on the remote machine.
- RemoteCacheDirectory string `mapstructure:"remote_cache_directory" required:"false"`
+ // be stored during the build on the remote machine. The path is relative to
+ // the remote_cache_datastore on the remote machine.
+ RemoteCacheDirectory string `mapstructure:"remote_cache_directory" required:"false"`
// The host of the remote machine used for access.
- // This is only required if remote_type is enabled.
- RemoteHost string `mapstructure:"remote_host" required:"false"`
+ // This is only required if remote_type is enabled.
+ RemoteHost string `mapstructure:"remote_host" required:"false"`
// The SSH port of the remote machine
- RemotePort int `mapstructure:"remote_port" required:"false"`
+ RemotePort int `mapstructure:"remote_port" required:"false"`
// The SSH username used to access the remote machine.
- RemoteUser string `mapstructure:"remote_username" required:"false"`
+ RemoteUser string `mapstructure:"remote_username" required:"false"`
// The SSH password for access to the remote machine.
- RemotePassword string `mapstructure:"remote_password" required:"false"`
+ RemotePassword string `mapstructure:"remote_password" required:"false"`
// The SSH key for access to the remote machine.
- RemotePrivateKey string `mapstructure:"remote_private_key_file" required:"false"`
+ RemotePrivateKey string `mapstructure:"remote_private_key_file" required:"false"`
// When Packer is preparing to run a
- // remote esxi build, and export is not disable, by default it runs a no-op
- // ovftool command to make sure that the remote_username and remote_password
- // given are valid. If you set this flag to true, Packer will skip this
- // validation. Default: false.
- SkipValidateCredentials bool `mapstructure:"skip_validate_credentials" required:"false"`
+ // remote esxi build, and export is not disable, by default it runs a no-op
+ // ovftool command to make sure that the remote_username and remote_password
+ // given are valid. If you set this flag to true, Packer will skip this
+ // validation. Default: false.
+ SkipValidateCredentials bool `mapstructure:"skip_validate_credentials" required:"false"`
}
func (c *DriverConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/vmware/common/export_config.go b/builder/vmware/common/export_config.go
index 6c90cea2d..edd5fa87c 100644
--- a/builder/vmware/common/export_config.go
+++ b/builder/vmware/common/export_config.go
@@ -10,44 +10,44 @@ import (
type ExportConfig struct {
// Either "ovf", "ova" or "vmx", this specifies the output
- // format of the exported virtual machine. This defaults to "ovf".
- // Before using this option, you need to install ovftool. This option
- // currently only works when option remote_type is set to "esx5".
- // Since ovftool is only capable of password based authentication
- // remote_password must be set when exporting the VM.
- Format string `mapstructure:"format" required:"false"`
+ // format of the exported virtual machine. This defaults to "ovf".
+ // Before using this option, you need to install ovftool. This option
+ // currently only works when option remote_type is set to "esx5".
+ // Since ovftool is only capable of password based authentication
+ // remote_password must be set when exporting the VM.
+ Format string `mapstructure:"format" required:"false"`
// Extra options to pass to ovftool
- // during export. Each item in the array is a new argument. The options
- // --noSSLVerify, --skipManifestCheck, and --targetType are reserved,
- // and should not be passed to this argument.
- // Currently, exporting the build VM (with ovftool) is only supported when
- // building on ESXi e.g. when remote_type is set to esx5. See the
- // Building on a Remote vSphere
- // Hypervisor
- // section below for more info.
+ // during export. Each item in the array is a new argument. The options
+ // --noSSLVerify, --skipManifestCheck, and --targetType are reserved,
+ // and should not be passed to this argument.
+ // Currently, exporting the build VM (with ovftool) is only supported when
+ // building on ESXi e.g. when remote_type is set to esx5. See the
+ // Building on a Remote vSphere
+ // Hypervisor
+ // section below for more info.
OVFToolOptions []string `mapstructure:"ovftool_options" required:"false"`
// Defaults to false. When enabled, Packer will
- // not export the VM. Useful if the build output is not the resultant
- // image, but created inside the VM.
- // Currently, exporting the build VM is only supported when building on
- // ESXi e.g. when remote_type is set to esx5. See the Building on a
- // Remote vSphere
- // Hypervisor
- // section below for more info.
- SkipExport bool `mapstructure:"skip_export" required:"false"`
+ // not export the VM. Useful if the build output is not the resultant
+ // image, but created inside the VM.
+ // Currently, exporting the build VM is only supported when building on
+ // ESXi e.g. when remote_type is set to esx5. See the Building on a
+ // Remote vSphere
+ // Hypervisor
+ // section below for more info.
+ SkipExport bool `mapstructure:"skip_export" required:"false"`
// Set this to true if you would like to keep
- // the VM registered with the remote ESXi server. If you do not need to export
- // the vm, then also set skip_export: true in order to avoid an unnecessary
- // step of using ovftool to export the vm. Defaults to false.
- KeepRegistered bool `mapstructure:"keep_registered" required:"false"`
+ // the VM registered with the remote ESXi server. If you do not need to export
+ // the vm, then also set skip_export: true in order to avoid an unnecessary
+ // step of using ovftool to export the vm. Defaults to false.
+ KeepRegistered bool `mapstructure:"keep_registered" required:"false"`
// VMware-created disks are defragmented and
- // compacted at the end of the build process using vmware-vdiskmanager or
- // vmkfstools in ESXi. In certain rare cases, this might actually end up
- // making the resulting disks slightly larger. If you find this to be the case,
- // you can disable compaction using this configuration value. Defaults to
- // false. Default to true for ESXi when disk_type_id is not explicitly
- // defined and false otherwise.
- SkipCompaction bool `mapstructure:"skip_compaction" required:"false"`
+ // compacted at the end of the build process using vmware-vdiskmanager or
+ // vmkfstools in ESXi. In certain rare cases, this might actually end up
+ // making the resulting disks slightly larger. If you find this to be the case,
+ // you can disable compaction using this configuration value. Defaults to
+ // false. Default to true for ESXi when disk_type_id is not explicitly
+ // defined and false otherwise.
+ SkipCompaction bool `mapstructure:"skip_compaction" required:"false"`
}
func (c *ExportConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/vmware/common/hw_config.go b/builder/vmware/common/hw_config.go
index 45e7189d3..bce8e11fb 100644
--- a/builder/vmware/common/hw_config.go
+++ b/builder/vmware/common/hw_config.go
@@ -13,40 +13,40 @@ import (
type HWConfig struct {
// The number of cpus to use when building the VM.
- CpuCount int `mapstructure:"cpus" required:"false"`
+ CpuCount int `mapstructure:"cpus" required:"false"`
// The amount of memory to use when building the VM
- // in megabytes.
+ // in megabytes.
MemorySize int `mapstructure:"memory" required:"false"`
// The number of cores per socket to use when building the VM.
- // This corresponds to the cpuid.coresPerSocket option in the .vmx file.
- CoreCount int `mapstructure:"cores" required:"false"`
+ // This corresponds to the cpuid.coresPerSocket option in the .vmx file.
+ CoreCount int `mapstructure:"cores" required:"false"`
// This is the network type that the virtual machine will
- // be created with. This can be one of the generic values that map to a device
- // such as hostonly, nat, or bridged. If the network is not one of these
- // values, then it is assumed to be a VMware network device. (VMnet0..x)
- Network string `mapstructure:"network" required:"false"`
+ // be created with. This can be one of the generic values that map to a device
+ // such as hostonly, nat, or bridged. If the network is not one of these
+ // values, then it is assumed to be a VMware network device. (VMnet0..x)
+ Network string `mapstructure:"network" required:"false"`
// This is the ethernet adapter type the the
- // virtual machine will be created with. By default the e1000 network adapter
- // type will be used by Packer. For more information, please consult the
- //
- // Choosing a network adapter for your virtual machine for desktop VMware
- // clients. For ESXi, refer to the proper ESXi documentation.
+ // virtual machine will be created with. By default the e1000 network adapter
+ // type will be used by Packer. For more information, please consult the
+ //
+ // Choosing a network adapter for your virtual machine for desktop VMware
+ // clients. For ESXi, refer to the proper ESXi documentation.
NetworkAdapterType string `mapstructure:"network_adapter_type" required:"false"`
// Specify whether to enable VMware's virtual soundcard
- // device when building the VM. Defaults to false.
+ // device when building the VM. Defaults to false.
Sound bool `mapstructure:"sound" required:"false"`
// Enable VMware's USB bus when building the guest VM.
- // Defaults to false. To enable usage of the XHCI bus for USB 3 (5 Gbit/s),
- // one can use the vmx_data option to enable it by specifying true for
- // the usb_xhci.present property.
- USB bool `mapstructure:"usb" required:"false"`
+ // Defaults to false. To enable usage of the XHCI bus for USB 3 (5 Gbit/s),
+ // one can use the vmx_data option to enable it by specifying true for
+ // the usb_xhci.present property.
+ USB bool `mapstructure:"usb" required:"false"`
// This specifies a serial port to add to the VM.
- // It has a format of Type:option1,option2,.... The field Type can be one
- // of the following values: FILE, DEVICE, PIPE, AUTO, or NONE.
- Serial string `mapstructure:"serial" required:"false"`
+ // It has a format of Type:option1,option2,.... The field Type can be one
+ // of the following values: FILE, DEVICE, PIPE, AUTO, or NONE.
+ Serial string `mapstructure:"serial" required:"false"`
// This specifies a parallel port to add to the VM. It
- // has the format of Type:option1,option2,.... Type can be one of the
- // following values: FILE, DEVICE, AUTO, or NONE.
+ // has the format of Type:option1,option2,.... Type can be one of the
+ // following values: FILE, DEVICE, AUTO, or NONE.
Parallel string `mapstructure:"parallel" required:"false"`
}
diff --git a/builder/vmware/common/output_config.go b/builder/vmware/common/output_config.go
index 6fa30ebbc..e99919dcb 100644
--- a/builder/vmware/common/output_config.go
+++ b/builder/vmware/common/output_config.go
@@ -11,11 +11,11 @@ import (
type OutputConfig struct {
// This is the path to the directory where the
- // resulting virtual machine will be created. This may be relative or absolute.
- // If relative, the path is relative to the working directory when packer
- // is executed. This directory must not exist or be empty prior to running
- // the builder. By default this is output-BUILDNAME where "BUILDNAME" is the
- // name of the build.
+ // resulting virtual machine will be created. This may be relative or absolute.
+ // If relative, the path is relative to the working directory when packer
+ // is executed. This directory must not exist or be empty prior to running
+ // the builder. By default this is output-BUILDNAME where "BUILDNAME" is the
+ // name of the build.
OutputDir string `mapstructure:"output_directory" required:"false"`
}
diff --git a/builder/vmware/common/run_config.go b/builder/vmware/common/run_config.go
index 214219525..e79bf20ea 100644
--- a/builder/vmware/common/run_config.go
+++ b/builder/vmware/common/run_config.go
@@ -10,28 +10,28 @@ import (
type RunConfig struct {
// Packer defaults to building VMware virtual machines
- // by launching a GUI that shows the console of the machine being built. When
- // this value is set to true, the machine will start without a console. For
- // VMware machines, Packer will output VNC connection information in case you
- // need to connect to the console to debug the build process.
+ // by launching a GUI that shows the console of the machine being built. When
+ // this value is set to true, the machine will start without a console. For
+ // VMware machines, Packer will output VNC connection information in case you
+ // need to connect to the console to debug the build process.
Headless bool `mapstructure:"headless" required:"false"`
// The IP address that should be
- // binded to for VNC. By default packer will use 127.0.0.1 for this. If you
- // wish to bind to all interfaces use 0.0.0.0.
- VNCBindAddress string `mapstructure:"vnc_bind_address" required:"false"`
+ // binded to for VNC. By default packer will use 127.0.0.1 for this. If you
+ // wish to bind to all interfaces use 0.0.0.0.
+ VNCBindAddress string `mapstructure:"vnc_bind_address" required:"false"`
// The minimum and maximum port
- // to use for VNC access to the virtual machine. The builder uses VNC to type
- // the initial boot_command. Because Packer generally runs in parallel,
- // Packer uses a randomly chosen port in this range that appears available. By
- // default this is 5900 to 6000. The minimum and maximum ports are
- // inclusive.
- VNCPortMin int `mapstructure:"vnc_port_min" required:"false"`
- VNCPortMax int `mapstructure:"vnc_port_max"`
+ // to use for VNC access to the virtual machine. The builder uses VNC to type
+ // the initial boot_command. Because Packer generally runs in parallel,
+ // Packer uses a randomly chosen port in this range that appears available. By
+ // default this is 5900 to 6000. The minimum and maximum ports are
+ // inclusive.
+ VNCPortMin int `mapstructure:"vnc_port_min" required:"false"`
+ VNCPortMax int `mapstructure:"vnc_port_max"`
// Don't auto-generate a VNC password that
- // is used to secure the VNC communication with the VM. This must be set to
- // true if building on ESXi 6.5 and 6.7 with VNC enabled. Defaults to
- // false.
- VNCDisablePassword bool `mapstructure:"vnc_disable_password" required:"false"`
+ // is used to secure the VNC communication with the VM. This must be set to
+ // true if building on ESXi 6.5 and 6.7 with VNC enabled. Defaults to
+ // false.
+ VNCDisablePassword bool `mapstructure:"vnc_disable_password" required:"false"`
}
func (c *RunConfig) Prepare(ctx *interpolate.Context) (errs []error) {
diff --git a/builder/vmware/common/shutdown_config.go b/builder/vmware/common/shutdown_config.go
index 96f7f7d8d..fd5d665fe 100644
--- a/builder/vmware/common/shutdown_config.go
+++ b/builder/vmware/common/shutdown_config.go
@@ -11,13 +11,13 @@ import (
type ShutdownConfig struct {
// The command to use to gracefully shut down the
- // machine once all the provisioning is done. By default this is an empty
- // string, which tells Packer to just forcefully shut down the machine.
- ShutdownCommand string `mapstructure:"shutdown_command" required:"false"`
+ // machine once all the provisioning is done. By default this is an empty
+ // string, which tells Packer to just forcefully shut down the machine.
+ ShutdownCommand string `mapstructure:"shutdown_command" required:"false"`
// The amount of time to wait after executing the
- // shutdown_command for the virtual machine to actually shut down. If it
- // doesn't shut down in this time, it is an error. By default, the timeout is
- // 5m or five minutes.
+ // shutdown_command for the virtual machine to actually shut down. If it
+ // doesn't shut down in this time, it is an error. By default, the timeout is
+ // 5m or five minutes.
RawShutdownTimeout string `mapstructure:"shutdown_timeout" required:"false"`
ShutdownTimeout time.Duration ``
diff --git a/builder/vmware/common/tools_config.go b/builder/vmware/common/tools_config.go
index b4dd01c91..b9cb0e1be 100644
--- a/builder/vmware/common/tools_config.go
+++ b/builder/vmware/common/tools_config.go
@@ -8,17 +8,17 @@ import (
type ToolsConfig struct {
// The flavor of the VMware Tools ISO to
- // upload into the VM. Valid values are darwin, linux, and windows. By
- // default, this is empty, which means VMware tools won't be uploaded.
+ // upload into the VM. Valid values are darwin, linux, and windows. By
+ // default, this is empty, which means VMware tools won't be uploaded.
ToolsUploadFlavor string `mapstructure:"tools_upload_flavor" required:"false"`
// The path in the VM to upload the
- // VMware tools. This only takes effect if tools_upload_flavor is non-empty.
- // This is a configuration
- // template that has a single
- // valid variable: Flavor, which will be the value of tools_upload_flavor.
- // By default the upload path is set to {{.Flavor}}.iso. This setting is not
- // used when remote_type is esx5.
- ToolsUploadPath string `mapstructure:"tools_upload_path" required:"false"`
+ // VMware tools. This only takes effect if tools_upload_flavor is non-empty.
+ // This is a configuration
+ // template that has a single
+ // valid variable: Flavor, which will be the value of tools_upload_flavor.
+ // By default the upload path is set to {{.Flavor}}.iso. This setting is not
+ // used when remote_type is esx5.
+ ToolsUploadPath string `mapstructure:"tools_upload_path" required:"false"`
}
func (c *ToolsConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/vmware/common/vmx_config.go b/builder/vmware/common/vmx_config.go
index 83d98dfc2..b7ee6a0e7 100644
--- a/builder/vmware/common/vmx_config.go
+++ b/builder/vmware/common/vmx_config.go
@@ -8,26 +8,26 @@ import (
type VMXConfig struct {
// Arbitrary key/values to enter
- // into the virtual machine VMX file. This is for advanced users who want to
- // set properties that aren't yet supported by the builder.
- VMXData map[string]string `mapstructure:"vmx_data" required:"false"`
+ // into the virtual machine VMX file. This is for advanced users who want to
+ // set properties that aren't yet supported by the builder.
+ VMXData map[string]string `mapstructure:"vmx_data" required:"false"`
// Identical to vmx_data,
- // except that it is run after the virtual machine is shutdown, and before the
- // virtual machine is exported.
- VMXDataPost map[string]string `mapstructure:"vmx_data_post" required:"false"`
+ // except that it is run after the virtual machine is shutdown, and before the
+ // virtual machine is exported.
+ VMXDataPost map[string]string `mapstructure:"vmx_data_post" required:"false"`
// Remove all ethernet interfaces
- // from the VMX file after building. This is for advanced users who understand
- // the ramifications, but is useful for building Vagrant boxes since Vagrant
- // will create ethernet interfaces when provisioning a box. Defaults to
- // false.
- VMXRemoveEthernet bool `mapstructure:"vmx_remove_ethernet_interfaces" required:"false"`
+ // from the VMX file after building. This is for advanced users who understand
+ // the ramifications, but is useful for building Vagrant boxes since Vagrant
+ // will create ethernet interfaces when provisioning a box. Defaults to
+ // false.
+ VMXRemoveEthernet bool `mapstructure:"vmx_remove_ethernet_interfaces" required:"false"`
// The name that will appear in your vSphere client,
- // and will be used for the vmx basename. This will override the "displayname"
- // value in your vmx file. It will also override the "displayname" if you have
- // set it in the "vmx_data" Packer option. This option is useful if you are
- // chaining vmx builds and want to make sure that the display name of each step
- // in the chain is unique.
- VMXDisplayName string `mapstructure:"display_name" required:"false"`
+ // and will be used for the vmx basename. This will override the "displayname"
+ // value in your vmx file. It will also override the "displayname" if you have
+ // set it in the "vmx_data" Packer option. This option is useful if you are
+ // chaining vmx builds and want to make sure that the display name of each step
+ // in the chain is unique.
+ VMXDisplayName string `mapstructure:"display_name" required:"false"`
}
func (c *VMXConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/builder/vmware/iso/config.go b/builder/vmware/iso/config.go
index c9434d31a..f11521e26 100644
--- a/builder/vmware/iso/config.go
+++ b/builder/vmware/iso/config.go
@@ -32,69 +32,69 @@ type Config struct {
vmwcommon.VMXConfig `mapstructure:",squash"`
vmwcommon.ExportConfig `mapstructure:",squash"`
// The size(s) of any additional
- // hard disks for the VM in megabytes. If this is not specified then the VM
- // will only contain a primary hard disk. The builder uses expandable, not
- // fixed-size virtual hard disks, so the actual file representing the disk will
- // not use the full size unless it is full.
+ // hard disks for the VM in megabytes. If this is not specified then the VM
+ // will only contain a primary hard disk. The builder uses expandable, not
+ // fixed-size virtual hard disks, so the actual file representing the disk will
+ // not use the full size unless it is full.
AdditionalDiskSize []uint `mapstructure:"disk_additional_size" required:"false"`
// The adapter type of the VMware virtual disk
- // to create. This option is for advanced usage, modify only if you know what
- // you're doing. Some of the options you can specify are ide, sata, nvme
- // or scsi (which uses the "lsilogic" scsi interface by default). If you
- // specify another option, Packer will assume that you're specifying a scsi
- // interface of that specified type. For more information, please consult the
- //
- // Virtual Disk Manager User's Guide for desktop VMware clients.
- // For ESXi, refer to the proper ESXi documentation.
- DiskAdapterType string `mapstructure:"disk_adapter_type" required:"false"`
+ // to create. This option is for advanced usage, modify only if you know what
+ // you're doing. Some of the options you can specify are ide, sata, nvme
+ // or scsi (which uses the "lsilogic" scsi interface by default). If you
+ // specify another option, Packer will assume that you're specifying a scsi
+ // interface of that specified type. For more information, please consult the
+ //
+ // Virtual Disk Manager User's Guide for desktop VMware clients.
+ // For ESXi, refer to the proper ESXi documentation.
+ DiskAdapterType string `mapstructure:"disk_adapter_type" required:"false"`
// The filename of the virtual disk that'll be created,
- // without the extension. This defaults to packer.
- DiskName string `mapstructure:"vmdk_name" required:"false"`
+ // without the extension. This defaults to packer.
+ DiskName string `mapstructure:"vmdk_name" required:"false"`
// The size of the hard disk for the VM in megabytes.
- // The builder uses expandable, not fixed-size virtual hard disks, so the
- // actual file representing the disk will not use the full size unless it
- // is full. By default this is set to 40000 (about 40 GB).
- DiskSize uint `mapstructure:"disk_size" required:"false"`
+ // The builder uses expandable, not fixed-size virtual hard disks, so the
+ // actual file representing the disk will not use the full size unless it
+ // is full. By default this is set to 40000 (about 40 GB).
+ DiskSize uint `mapstructure:"disk_size" required:"false"`
// The type of VMware virtual disk to create. This
- // option is for advanced usage.
- DiskTypeId string `mapstructure:"disk_type_id" required:"false"`
+ // option is for advanced usage.
+ DiskTypeId string `mapstructure:"disk_type_id" required:"false"`
// Either "ovf", "ova" or "vmx", this specifies the output
- // format of the exported virtual machine. This defaults to "ovf".
- // Before using this option, you need to install ovftool. This option
- // currently only works when option remote_type is set to "esx5".
- // Since ovftool is only capable of password based authentication
- // remote_password must be set when exporting the VM.
- Format string `mapstructure:"format" required:"false"`
+ // format of the exported virtual machine. This defaults to "ovf".
+ // Before using this option, you need to install ovftool. This option
+ // currently only works when option remote_type is set to "esx5".
+ // Since ovftool is only capable of password based authentication
+ // remote_password must be set when exporting the VM.
+ Format string `mapstructure:"format" required:"false"`
// The adapter type (or bus) that will be used
- // by the cdrom device. This is chosen by default based on the disk adapter
- // type. VMware tends to lean towards ide for the cdrom device unless
- // sata is chosen for the disk adapter and so Packer attempts to mirror
- // this logic. This field can be specified as either ide, sata, or scsi.
+ // by the cdrom device. This is chosen by default based on the disk adapter
+ // type. VMware tends to lean towards ide for the cdrom device unless
+ // sata is chosen for the disk adapter and so Packer attempts to mirror
+ // this logic. This field can be specified as either ide, sata, or scsi.
CdromAdapterType string `mapstructure:"cdrom_adapter_type" required:"false"`
// The guest OS type being installed. This will be
- // set in the VMware VMX. By default this is other. By specifying a more
- // specific OS type, VMware may perform some optimizations or virtual hardware
- // changes to better support the operating system running in the
- // virtual machine.
+ // set in the VMware VMX. By default this is other. By specifying a more
+ // specific OS type, VMware may perform some optimizations or virtual hardware
+ // changes to better support the operating system running in the
+ // virtual machine.
GuestOSType string `mapstructure:"guest_os_type" required:"false"`
// The vmx hardware
- // version
- // for the new virtual machine. Only the default value has been tested, any
- // other value is experimental. Default value is 9.
- Version string `mapstructure:"version" required:"false"`
+ // version
+ // for the new virtual machine. Only the default value has been tested, any
+ // other value is experimental. Default value is 9.
+ Version string `mapstructure:"version" required:"false"`
// This is the name of the VMX file for the new virtual
- // machine, without the file extension. By default this is packer-BUILDNAME,
- // where "BUILDNAME" is the name of the build.
- VMName string `mapstructure:"vm_name" required:"false"`
+ // machine, without the file extension. By default this is packer-BUILDNAME,
+ // where "BUILDNAME" is the name of the build.
+ VMName string `mapstructure:"vm_name" required:"false"`
VMXDiskTemplatePath string `mapstructure:"vmx_disk_template_path"`
// Path to a configuration
- // template that defines the
- // contents of the virtual machine VMX file for VMware. This is for advanced
- // users only as this can render the virtual machine non-functional. See
- // below for more information. For basic VMX modifications, try
- // vmx_data first.
- VMXTemplatePath string `mapstructure:"vmx_template_path" required:"false"`
+ // template that defines the
+ // contents of the virtual machine VMX file for VMware. This is for advanced
+ // users only as this can render the virtual machine non-functional. See
+ // below for more information. For basic VMX modifications, try
+ // vmx_data first.
+ VMXTemplatePath string `mapstructure:"vmx_template_path" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/vmware/vmx/config.go b/builder/vmware/vmx/config.go
index bb579e43d..17c7bb3f4 100644
--- a/builder/vmware/vmx/config.go
+++ b/builder/vmware/vmx/config.go
@@ -29,21 +29,21 @@ type Config struct {
vmwcommon.VMXConfig `mapstructure:",squash"`
vmwcommon.ExportConfig `mapstructure:",squash"`
// By default Packer creates a 'full' clone of
- // the virtual machine specified in source_path. The resultant virtual
- // machine is fully independant from the parent it was cloned from.
- Linked bool `mapstructure:"linked" required:"false"`
+ // the virtual machine specified in source_path. The resultant virtual
+ // machine is fully independant from the parent it was cloned from.
+ Linked bool `mapstructure:"linked" required:"false"`
// The type of remote machine that will be used to
- // build this VM rather than a local desktop product. The only value accepted
- // for this currently is esx5. If this is not set, a desktop product will
- // be used. By default, this is not set.
+ // build this VM rather than a local desktop product. The only value accepted
+ // for this currently is esx5. If this is not set, a desktop product will
+ // be used. By default, this is not set.
RemoteType string `mapstructure:"remote_type" required:"false"`
// Path to the source VMX file to clone. If
- // remote_type is enabled then this specifies a path on the remote_host.
+ // remote_type is enabled then this specifies a path on the remote_host.
SourcePath string `mapstructure:"source_path" required:"true"`
// This is the name of the VMX file for the new virtual
- // machine, without the file extension. By default this is packer-BUILDNAME,
- // where "BUILDNAME" is the name of the build.
- VMName string `mapstructure:"vm_name" required:"false"`
+ // machine, without the file extension. By default this is packer-BUILDNAME,
+ // where "BUILDNAME" is the name of the build.
+ VMName string `mapstructure:"vm_name" required:"false"`
ctx interpolate.Context
}
diff --git a/builder/yandex/config.go b/builder/yandex/config.go
index bab294f62..0609b5555 100644
--- a/builder/yandex/config.go
+++ b/builder/yandex/config.go
@@ -28,81 +28,81 @@ type Config struct {
common.PackerConfig `mapstructure:",squash"`
Communicator communicator.Config `mapstructure:",squash"`
// Non standard api endpoint URL.
- Endpoint string `mapstructure:"endpoint" required:"false"`
+ Endpoint string `mapstructure:"endpoint" required:"false"`
// The folder ID that will be used to launch instances and store images.
- // Alternatively you may set value by environment variable YC_FOLDER_ID.
- FolderID string `mapstructure:"folder_id" required:"true"`
- // Path to file with Service Account key in json format. This
- // is an alternative method to authenticate to Yandex.Cloud. Alternatively you may set environment variable
- // YC_SERVICE_ACCOUNT_KEY_FILE.
+ // Alternatively you may set value by environment variable YC_FOLDER_ID.
+ FolderID string `mapstructure:"folder_id" required:"true"`
+ // Path to file with Service Account key in json format. This
+ // is an alternative method to authenticate to Yandex.Cloud. Alternatively you may set environment variable
+ // YC_SERVICE_ACCOUNT_KEY_FILE.
ServiceAccountKeyFile string `mapstructure:"service_account_key_file" required:"false"`
// OAuth token to use to authenticate to Yandex.Cloud. Alternatively you may set
- // value by environment variable YC_TOKEN.
- Token string `mapstructure:"token" required:"true"`
+ // value by environment variable YC_TOKEN.
+ Token string `mapstructure:"token" required:"true"`
// The name of the disk, if unset the instance name
- // will be used.
- DiskName string `mapstructure:"disk_name" required:"false"`
+ // will be used.
+ DiskName string `mapstructure:"disk_name" required:"false"`
// The size of the disk in GB. This defaults to 10, which is 10GB.
- DiskSizeGb int `mapstructure:"disk_size_gb" required:"false"`
+ DiskSizeGb int `mapstructure:"disk_size_gb" required:"false"`
// Specify disk type for the launched instance. Defaults to network-hdd.
- DiskType string `mapstructure:"disk_type" required:"false"`
+ DiskType string `mapstructure:"disk_type" required:"false"`
// The description of the resulting image.
- ImageDescription string `mapstructure:"image_description" required:"false"`
+ ImageDescription string `mapstructure:"image_description" required:"false"`
// The family name of the resulting image.
- ImageFamily string `mapstructure:"image_family" required:"false"`
+ ImageFamily string `mapstructure:"image_family" required:"false"`
// Key/value pair labels to
- // apply to the created image.
- ImageLabels map[string]string `mapstructure:"image_labels" required:"false"`
+ // apply to the created image.
+ ImageLabels map[string]string `mapstructure:"image_labels" required:"false"`
// The unique name of the resulting image. Defaults to
- // packer-{{timestamp}}.
- ImageName string `mapstructure:"image_name" required:"false"`
+ // packer-{{timestamp}}.
+ ImageName string `mapstructure:"image_name" required:"false"`
// License IDs that indicate which licenses are attached to resulting image.
- ImageProductIDs []string `mapstructure:"image_product_ids" required:"false"`
+ ImageProductIDs []string `mapstructure:"image_product_ids" required:"false"`
// The number of cores available to the instance.
- InstanceCores int `mapstructure:"instance_cores" required:"false"`
+ InstanceCores int `mapstructure:"instance_cores" required:"false"`
// The amount of memory available to the instance, specified in gigabytes.
- InstanceMemory int `mapstructure:"instance_mem_gb" required:"false"`
+ InstanceMemory int `mapstructure:"instance_mem_gb" required:"false"`
// The name assigned to the instance.
- InstanceName string `mapstructure:"instance_name" required:"false"`
+ InstanceName string `mapstructure:"instance_name" required:"false"`
// Key/value pair labels to apply to
- // the launched instance.
- Labels map[string]string `mapstructure:"labels" required:"false"`
+ // the launched instance.
+ Labels map[string]string `mapstructure:"labels" required:"false"`
// Identifier of the hardware platform configuration for the instance. This defaults to standard-v1.
- PlatformID string `mapstructure:"platform_id" required:"false"`
+ PlatformID string `mapstructure:"platform_id" required:"false"`
// Metadata applied to the launched
- // instance.
- Metadata map[string]string `mapstructure:"metadata" required:"false"`
+ // instance.
+ Metadata map[string]string `mapstructure:"metadata" required:"false"`
// File path to save serial port output of the launched instance.
- SerialLogFile string `mapstructure:"serial_log_file" required:"false"`
+ SerialLogFile string `mapstructure:"serial_log_file" required:"false"`
// The source image family to create the new image
- // from. You can also specify source_image_id instead. Just one of a source_image_id or
- // source_image_family must be specified. Example: ubuntu-1804-lts
- SourceImageFamily string `mapstructure:"source_image_family" required:"true"`
+ // from. You can also specify source_image_id instead. Just one of a source_image_id or
+ // source_image_family must be specified. Example: ubuntu-1804-lts
+ SourceImageFamily string `mapstructure:"source_image_family" required:"true"`
// The ID of the folder containing the source image.
- SourceImageFolderID string `mapstructure:"source_image_folder_id" required:"false"`
+ SourceImageFolderID string `mapstructure:"source_image_folder_id" required:"false"`
// The source image ID to use to create the new image
- // from.
- SourceImageID string `mapstructure:"source_image_id" required:"false"`
- // The Yandex VPC subnet id to use for
- // the launched instance. Note, the zone of the subnet must match the
- // zone in which the VM is launched.
- SubnetID string `mapstructure:"subnet_id" required:"false"`
- // If set to true, then launched instance will have external internet
- // access.
- UseIPv4Nat bool `mapstructure:"use_ipv4_nat" required:"false"`
+ // from.
+ SourceImageID string `mapstructure:"source_image_id" required:"false"`
+ // The Yandex VPC subnet id to use for
+ // the launched instance. Note, the zone of the subnet must match the
+ // zone in which the VM is launched.
+ SubnetID string `mapstructure:"subnet_id" required:"false"`
+ // If set to true, then launched instance will have external internet
+ // access.
+ UseIPv4Nat bool `mapstructure:"use_ipv4_nat" required:"false"`
// Set to true to enable IPv6 for the instance being
- // created. This defaults to false, or not enabled.
- // -> Note: ~> Usage of IPv6 will be available in the future.
- UseIPv6 bool `mapstructure:"use_ipv6" required:"false"`
+ // created. This defaults to false, or not enabled.
+ // -> Note: ~> Usage of IPv6 will be available in the future.
+ UseIPv6 bool `mapstructure:"use_ipv6" required:"false"`
// If true, use the instance's internal IP address
- // instead of its external IP during building.
- UseInternalIP bool `mapstructure:"use_internal_ip" required:"false"`
+ // instead of its external IP during building.
+ UseInternalIP bool `mapstructure:"use_internal_ip" required:"false"`
// The name of the zone to launch the instance. This defaults to ru-central1-a.
- Zone string `mapstructure:"zone" required:"false"`
+ Zone string `mapstructure:"zone" required:"false"`
- ctx interpolate.Context
+ ctx interpolate.Context
// The time to wait for instance state changes.
- // Defaults to 5m.
+ // Defaults to 5m.
StateTimeout time.Duration `mapstructure:"state_timeout" required:"false"`
}
From 4bf6a56d71fc968b58732ad7c9f9e416d936f7a7 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Thu, 6 Jun 2019 16:44:48 +0200
Subject: [PATCH 25/97] split communitacor.SSH & communitacor.WinRM to
facilitate documentation
put doc for communicator into the struct
---
builder/alicloud/ecs/run_config_test.go | 4 +-
builder/amazon/common/run_config_test.go | 4 +-
.../common/step_run_spot_instance_test.go | 4 +-
builder/openstack/run_config_test.go | 4 +-
builder/parallels/common/ssh_config_test.go | 4 +-
builder/tencentcloud/cvm/run_config_test.go | 4 +-
builder/virtualbox/common/ssh_config_test.go | 4 +-
builder/vmware/common/ssh_config_test.go | 4 +-
helper/communicator/config.go | 208 ++++++++++++++----
helper/communicator/config_test.go | 50 +++--
10 files changed, 224 insertions(+), 66 deletions(-)
diff --git a/builder/alicloud/ecs/run_config_test.go b/builder/alicloud/ecs/run_config_test.go
index 46d9b11de..f0cb6d67b 100644
--- a/builder/alicloud/ecs/run_config_test.go
+++ b/builder/alicloud/ecs/run_config_test.go
@@ -13,7 +13,9 @@ func testConfig() *RunConfig {
AlicloudSourceImage: "alicloud_images",
InstanceType: "ecs.n1.tiny",
Comm: communicator.Config{
- SSHUsername: "alicloud",
+ SSH: communicator.SSH{
+ SSHUsername: "alicloud",
+ },
},
}
}
diff --git a/builder/amazon/common/run_config_test.go b/builder/amazon/common/run_config_test.go
index 9228dbcba..2ea0406b0 100644
--- a/builder/amazon/common/run_config_test.go
+++ b/builder/amazon/common/run_config_test.go
@@ -24,7 +24,9 @@ func testConfig() *RunConfig {
InstanceType: "m1.small",
Comm: communicator.Config{
- SSHUsername: "foo",
+ SSH: communicator.SSH{
+ SSHUsername: "foo",
+ },
},
}
}
diff --git a/builder/amazon/common/step_run_spot_instance_test.go b/builder/amazon/common/step_run_spot_instance_test.go
index 0139e0b5a..b5a37292e 100644
--- a/builder/amazon/common/step_run_spot_instance_test.go
+++ b/builder/amazon/common/step_run_spot_instance_test.go
@@ -93,7 +93,9 @@ func getBasicStep() *StepRunSpotInstance {
BlockDurationMinutes: 0,
Debug: false,
Comm: &communicator.Config{
- SSHKeyPairName: "foo",
+ SSH: communicator.SSH{
+ SSHKeyPairName: "foo",
+ },
},
EbsOptimized: false,
ExpectedRootDevice: "ebs",
diff --git a/builder/openstack/run_config_test.go b/builder/openstack/run_config_test.go
index fd535496f..bf08f209c 100644
--- a/builder/openstack/run_config_test.go
+++ b/builder/openstack/run_config_test.go
@@ -23,7 +23,9 @@ func testRunConfig() *RunConfig {
Flavor: "m1.small",
Comm: communicator.Config{
- SSHUsername: "foo",
+ SSH: communicator.SSH{
+ SSHUsername: "foo",
+ },
},
}
}
diff --git a/builder/parallels/common/ssh_config_test.go b/builder/parallels/common/ssh_config_test.go
index 7a96920bc..7e0b2ad36 100644
--- a/builder/parallels/common/ssh_config_test.go
+++ b/builder/parallels/common/ssh_config_test.go
@@ -11,7 +11,9 @@ import (
func testSSHConfig() *SSHConfig {
return &SSHConfig{
Comm: communicator.Config{
- SSHUsername: "foo",
+ SSH: communicator.SSH{
+ SSHUsername: "foo",
+ },
},
}
}
diff --git a/builder/tencentcloud/cvm/run_config_test.go b/builder/tencentcloud/cvm/run_config_test.go
index 9f2dc5b35..d6ffb889b 100644
--- a/builder/tencentcloud/cvm/run_config_test.go
+++ b/builder/tencentcloud/cvm/run_config_test.go
@@ -13,7 +13,9 @@ func testConfig() *TencentCloudRunConfig {
SourceImageId: "img-qwer1234",
InstanceType: "S3.SMALL2",
Comm: communicator.Config{
- SSHUsername: "tencentcloud",
+ SSH: communicator.SSH{
+ SSHUsername: "tencentcloud",
+ },
},
}
}
diff --git a/builder/virtualbox/common/ssh_config_test.go b/builder/virtualbox/common/ssh_config_test.go
index 1c89c8770..9231685bb 100644
--- a/builder/virtualbox/common/ssh_config_test.go
+++ b/builder/virtualbox/common/ssh_config_test.go
@@ -11,7 +11,9 @@ import (
func testSSHConfig() *SSHConfig {
return &SSHConfig{
Comm: communicator.Config{
- SSHUsername: "foo",
+ SSH: communicator.SSH{
+ SSHUsername: "foo",
+ },
},
}
}
diff --git a/builder/vmware/common/ssh_config_test.go b/builder/vmware/common/ssh_config_test.go
index 7a96920bc..7e0b2ad36 100644
--- a/builder/vmware/common/ssh_config_test.go
+++ b/builder/vmware/common/ssh_config_test.go
@@ -11,7 +11,9 @@ import (
func testSSHConfig() *SSHConfig {
return &SSHConfig{
Comm: communicator.Config{
- SSHUsername: "foo",
+ SSH: communicator.SSH{
+ SSHUsername: "foo",
+ },
},
}
}
diff --git a/helper/communicator/config.go b/helper/communicator/config.go
index 5488beccd..9d7486780 100644
--- a/helper/communicator/config.go
+++ b/helper/communicator/config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package communicator
import (
@@ -21,54 +23,180 @@ import (
// Config is the common configuration that communicators allow within
// a builder.
type Config struct {
+ // Packer currently supports three kinds of communicators:
+ //
+ // - `none` - No communicator will be used. If this is set, most
+ // provisioners also can't be used.
+ //
+ // - `ssh` - An SSH connection will be established to the machine. This
+ // is usually the default.
+ //
+ // - `winrm` - A WinRM connection will be established.
+ //
+ // In addition to the above, some builders have custom communicators they
+ // can use. For example, the Docker builder has a "docker" communicator
+ // that uses `docker exec` and `docker cp` to execute scripts and copy
+ // files.
Type string `mapstructure:"communicator"`
+ // We recommend that you enable SSH or WinRM as the very last step in your
+ // guest's bootstrap script, but sometimes you may have a race condition where
+ // you need Packer to wait before attempting to connect to your guest.
+ //
+ // If you end up in this situation, you can use the template option
+ // `pause_before_connecting`. By default, there is no pause. For example:
+ //
+ // ```json
+ // {
+ // "communicator": "ssh",
+ // "ssh_username": "myuser",
+ // "pause_before_connecting": "10m"
+ // }
+ // ```
+ //
+ // In this example, Packer will check whether it can connect, as normal. But once
+ // a connection attempt is successful, it will disconnect and then wait 10 minutes
+ // before connecting to the guest and beginning provisioning.
+ PauseBeforeConnect time.Duration `mapstructure:"pause_before_connecting"`
+
+ SSH `mapstructure:",squash"`
+ WinRM `mapstructure:",squash"`
+}
+
+type SSH struct {
// SSH
- SSHHost string `mapstructure:"ssh_host"`
- SSHPort int `mapstructure:"ssh_port"`
- SSHUsername string `mapstructure:"ssh_username"`
- SSHPassword string `mapstructure:"ssh_password"`
- SSHKeyPairName string `mapstructure:"ssh_keypair_name"`
- SSHTemporaryKeyPairName string `mapstructure:"temporary_key_pair_name"`
- SSHClearAuthorizedKeys bool `mapstructure:"ssh_clear_authorized_keys"`
- SSHPrivateKeyFile string `mapstructure:"ssh_private_key_file"`
- SSHInterface string `mapstructure:"ssh_interface"`
- SSHIPVersion string `mapstructure:"ssh_ip_version"`
- SSHPty bool `mapstructure:"ssh_pty"`
- SSHTimeout time.Duration `mapstructure:"ssh_timeout"`
- SSHAgentAuth bool `mapstructure:"ssh_agent_auth"`
- SSHDisableAgentForwarding bool `mapstructure:"ssh_disable_agent_forwarding"`
- SSHHandshakeAttempts int `mapstructure:"ssh_handshake_attempts"`
- SSHBastionHost string `mapstructure:"ssh_bastion_host"`
- SSHBastionPort int `mapstructure:"ssh_bastion_port"`
- SSHBastionAgentAuth bool `mapstructure:"ssh_bastion_agent_auth"`
- SSHBastionUsername string `mapstructure:"ssh_bastion_username"`
- SSHBastionPassword string `mapstructure:"ssh_bastion_password"`
- SSHBastionPrivateKeyFile string `mapstructure:"ssh_bastion_private_key_file"`
- SSHFileTransferMethod string `mapstructure:"ssh_file_transfer_method"`
- SSHProxyHost string `mapstructure:"ssh_proxy_host"`
- SSHProxyPort int `mapstructure:"ssh_proxy_port"`
- SSHProxyUsername string `mapstructure:"ssh_proxy_username"`
- SSHProxyPassword string `mapstructure:"ssh_proxy_password"`
- SSHKeepAliveInterval time.Duration `mapstructure:"ssh_keep_alive_interval"`
- SSHReadWriteTimeout time.Duration `mapstructure:"ssh_read_write_timeout"`
+
+ // The address to SSH to. This usually is automatically configured by the
+ // builder.
+ SSHHost string `mapstructure:"ssh_host"`
+ // The port to connect to SSH. This defaults to `22`.
+ SSHPort int `mapstructure:"ssh_port"`
+ // The username to connect to SSH with. Required if using SSH.
+ SSHUsername string `mapstructure:"ssh_username"`
+ // A plaintext password to use to authenticate with SSH.
+ SSHPassword string `mapstructure:"ssh_password"`
+ // If specified, this is the key that will be used for SSH with the
+ // machine. The key must match a key pair name loaded up into Amazon EC2.
+ // By default, this is blank, and Packer will generate a temporary keypair
+ // unless [`ssh_password`](../templates/communicator.html#ssh_password) is
+ // used.
+ // [`ssh_private_key_file`](../templates/communicator.html#ssh_private_key_file)
+ // or `ssh_agent_auth` must be specified when `ssh_keypair_name` is
+ // utilized.
+ SSHKeyPairName string `mapstructure:"ssh_keypair_name"`
+ SSHTemporaryKeyPairName string `mapstructure:"temporary_key_pair_name"`
+ // If true, Packer will attempt to remove its temporary key from
+ // `~/.ssh/authorized_keys` and `/root/.ssh/authorized_keys`. This is a
+ // mostly cosmetic option, since Packer will delete the temporary private
+ // key from the host system regardless of whether this is set to true
+ // (unless the user has set the `-debug` flag). Defaults to "false";
+ // currently only works on guests with `sed` installed.
+ SSHClearAuthorizedKeys bool `mapstructure:"ssh_clear_authorized_keys"`
+ // Path to a PEM encoded private key file to use to authenticate with SSH.
+ // The `~` can be used in path and will be expanded to the home directory
+ // of current user.
+ SSHPrivateKeyFile string `mapstructure:"ssh_private_key_file"`
+ // One of `public_ip`, `private_ip`, `public_dns`, or `private_dns`. If
+ // set, either the public IP address, private IP address, public DNS name
+ // or private DNS name will used as the host for SSH. The default behaviour
+ // if inside a VPC is to use the public IP address if available, otherwise
+ // the private IP address will be used. If not in a VPC the public DNS name
+ // will be used. Also works for WinRM.
+ //
+ // Where Packer is configured for an outbound proxy but WinRM traffic
+ // should be direct, `ssh_interface` must be set to `private_dns` and
+ // `.compute.internal` included in the `NO_PROXY` environment
+ // variable.
+ SSHInterface string `mapstructure:"ssh_interface"`
+ SSHIPVersion string `mapstructure:"ssh_ip_version"`
+ // If `true`, a PTY will be requested for the SSH connection. This defaults
+ // to `false`.
+ SSHPty bool `mapstructure:"ssh_pty"`
+ // The time to wait for SSH to become available. Packer uses this to
+ // determine when the machine has booted so this is usually quite long.
+ // Example value: `10m`.
+ SSHTimeout time.Duration `mapstructure:"ssh_timeout"`
+ // If true, the local SSH agent will be used to authenticate connections to
+ // the source instance. No temporary keypair will be created, and the
+ // values of `ssh_password` and `ssh_private_key_file` will be ignored. To
+ // use this option with a key pair already configured in the source AMI,
+ // leave the `ssh_keypair_name` blank. To associate an existing key pair in
+ // AWS with the source instance, set the `ssh_keypair_name` field to the
+ // name of the key pair.
+ SSHAgentAuth bool `mapstructure:"ssh_agent_auth"`
+ // If true, SSH agent forwarding will be disabled. Defaults to `false`.
+ SSHDisableAgentForwarding bool `mapstructure:"ssh_disable_agent_forwarding"`
+ // The number of handshakes to attempt with SSH once it can connect. This
+ // defaults to `10`.
+ SSHHandshakeAttempts int `mapstructure:"ssh_handshake_attempts"`
+ // A bastion host to use for the actual SSH connection.
+ SSHBastionHost string `mapstructure:"ssh_bastion_host"`
+ // The port of the bastion host. Defaults to `22`.
+ SSHBastionPort int `mapstructure:"ssh_bastion_port"`
+ // If `true`, the local SSH agent will be used to authenticate with the
+ // bastion host. Defaults to `false`.
+ SSHBastionAgentAuth bool `mapstructure:"ssh_bastion_agent_auth"`
+ // The username to connect to the bastion host.
+ SSHBastionUsername string `mapstructure:"ssh_bastion_username"`
+ // The password to use to authenticate with the bastion host.
+ SSHBastionPassword string `mapstructure:"ssh_bastion_password"`
+ // Path to a PEM encoded private key file to use to authenticate with the
+ // bastion host. The `~` can be used in path and will be expanded to the
+ // home directory of current user.
+ SSHBastionPrivateKeyFile string `mapstructure:"ssh_bastion_private_key_file"`
+ // `scp` or `sftp` - How to transfer files, Secure copy (default) or SSH
+ // File Transfer Protocol.
+ SSHFileTransferMethod string `mapstructure:"ssh_file_transfer_method"`
+ // A SOCKS proxy host to use for SSH connection
+ SSHProxyHost string `mapstructure:"ssh_proxy_host"`
+ // A port of the SOCKS proxy. Defaults to `1080`.
+ SSHProxyPort int `mapstructure:"ssh_proxy_port"`
+ // The optional username to authenticate with the proxy server.
+ SSHProxyUsername string `mapstructure:"ssh_proxy_username"`
+ // The optional password to use to authenticate with the proxy server.
+ SSHProxyPassword string `mapstructure:"ssh_proxy_password"`
+ // How often to send "keep alive" messages to the server. Set to a negative
+ // value (`-1s`) to disable. Example value: `10s`. Defaults to `5s`.
+ SSHKeepAliveInterval time.Duration `mapstructure:"ssh_keep_alive_interval"`
+ // The amount of time to wait for a remote command to end. This might be
+ // useful if, for example, packer hangs on a connection after a reboot.
+ // Example: `5m`. Disabled by default.
+ SSHReadWriteTimeout time.Duration `mapstructure:"ssh_read_write_timeout"`
+
// SSH Internals
SSHPublicKey []byte
SSHPrivateKey []byte
+}
- // WinRM
- WinRMUser string `mapstructure:"winrm_username"`
- WinRMPassword string `mapstructure:"winrm_password"`
- WinRMHost string `mapstructure:"winrm_host"`
- WinRMPort int `mapstructure:"winrm_port"`
- WinRMTimeout time.Duration `mapstructure:"winrm_timeout"`
- WinRMUseSSL bool `mapstructure:"winrm_use_ssl"`
- WinRMInsecure bool `mapstructure:"winrm_insecure"`
- WinRMUseNTLM bool `mapstructure:"winrm_use_ntlm"`
+type WinRM struct {
+ // The username to use to connect to WinRM.
+ WinRMUser string `mapstructure:"winrm_username"`
+ // The password to use to connect to WinRM.
+ WinRMPassword string `mapstructure:"winrm_password"`
+ // The address for WinRM to connect to.
+ //
+ // NOTE: If using an Amazon EBS builder, you can specify the interface
+ // WinRM connects to via
+ // [`ssh_interface`](https://www.packer.io/docs/builders/amazon-ebs.html#ssh_interface)
+ WinRMHost string `mapstructure:"winrm_host"`
+ // The WinRM port to connect to. This defaults to `5985` for plain
+ // unencrypted connection and `5986` for SSL when `winrm_use_ssl` is set to
+ // true.
+ WinRMPort int `mapstructure:"winrm_port"`
+ // The amount of time to wait for WinRM to become available. This defaults
+ // to `30m` since setting up a Windows machine generally takes a long time.
+ WinRMTimeout time.Duration `mapstructure:"winrm_timeout"`
+ // If `true`, use HTTPS for WinRM.
+ WinRMUseSSL bool `mapstructure:"winrm_use_ssl"`
+ // If `true`, do not check server certificate chain and host name.
+ WinRMInsecure bool `mapstructure:"winrm_insecure"`
+ // If `true`, NTLMv2 authentication (with session security) will be used
+ // for WinRM, rather than default (basic authentication), removing the
+ // requirement for basic authentication to be enabled within the target
+ // guest. Further reading for remote connection authentication can be found
+ // [here](https://msdn.microsoft.com/en-us/library/aa384295(v=vs.85).aspx).
+ WinRMUseNTLM bool `mapstructure:"winrm_use_ntlm"`
WinRMTransportDecorator func() winrm.Transporter
-
- // Delay
- PauseBeforeConnect time.Duration `mapstructure:"pause_before_connecting"`
}
// ReadSSHPrivateKeyFile returns the SSH private key bytes
diff --git a/helper/communicator/config_test.go b/helper/communicator/config_test.go
index c5af24114..688f56c55 100644
--- a/helper/communicator/config_test.go
+++ b/helper/communicator/config_test.go
@@ -10,7 +10,9 @@ import (
func testConfig() *Config {
return &Config{
- SSHUsername: "root",
+ SSH: SSH{
+ SSHUsername: "root",
+ },
}
}
@@ -41,8 +43,10 @@ func TestConfig_badtype(t *testing.T) {
func TestConfig_winrm_noport(t *testing.T) {
c := &Config{
- Type: "winrm",
- WinRMUser: "admin",
+ Type: "winrm",
+ WinRM: WinRM{
+ WinRMUser: "admin",
+ },
}
if err := c.Prepare(testContext(t)); len(err) > 0 {
t.Fatalf("bad: %#v", err)
@@ -56,9 +60,11 @@ func TestConfig_winrm_noport(t *testing.T) {
func TestConfig_winrm_noport_ssl(t *testing.T) {
c := &Config{
- Type: "winrm",
- WinRMUser: "admin",
- WinRMUseSSL: true,
+ Type: "winrm",
+ WinRM: WinRM{
+ WinRMUser: "admin",
+ WinRMUseSSL: true,
+ },
}
if err := c.Prepare(testContext(t)); len(err) > 0 {
t.Fatalf("bad: %#v", err)
@@ -72,9 +78,11 @@ func TestConfig_winrm_noport_ssl(t *testing.T) {
func TestConfig_winrm_port(t *testing.T) {
c := &Config{
- Type: "winrm",
- WinRMUser: "admin",
- WinRMPort: 5509,
+ Type: "winrm",
+ WinRM: WinRM{
+ WinRMUser: "admin",
+ WinRMPort: 5509,
+ },
}
if err := c.Prepare(testContext(t)); len(err) > 0 {
t.Fatalf("bad: %#v", err)
@@ -88,10 +96,12 @@ func TestConfig_winrm_port(t *testing.T) {
func TestConfig_winrm_port_ssl(t *testing.T) {
c := &Config{
- Type: "winrm",
- WinRMUser: "admin",
- WinRMPort: 5510,
- WinRMUseSSL: true,
+ Type: "winrm",
+ WinRM: WinRM{
+ WinRMUser: "admin",
+ WinRMPort: 5510,
+ WinRMUseSSL: true,
+ },
}
if err := c.Prepare(testContext(t)); len(err) > 0 {
t.Fatalf("bad: %#v", err)
@@ -105,9 +115,11 @@ func TestConfig_winrm_port_ssl(t *testing.T) {
func TestConfig_winrm_use_ntlm(t *testing.T) {
c := &Config{
- Type: "winrm",
- WinRMUser: "admin",
- WinRMUseNTLM: true,
+ Type: "winrm",
+ WinRM: WinRM{
+ WinRMUser: "admin",
+ WinRMUseNTLM: true,
+ },
}
if err := c.Prepare(testContext(t)); len(err) > 0 {
t.Fatalf("bad: %#v", err)
@@ -128,8 +140,10 @@ func TestConfig_winrm_use_ntlm(t *testing.T) {
func TestConfig_winrm(t *testing.T) {
c := &Config{
- Type: "winrm",
- WinRMUser: "admin",
+ Type: "winrm",
+ WinRM: WinRM{
+ WinRMUser: "admin",
+ },
}
if err := c.Prepare(testContext(t)); len(err) > 0 {
t.Fatalf("bad: %#v", err)
From 4ae10f08b2dca2291669f27cfa1d69fcb4013c70 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Thu, 6 Jun 2019 16:29:47 +0200
Subject: [PATCH 26/97] docs: generate builders partials from struct comments
---
cmd/struct-markdown/main.go | 138 ++++++++++
cmd/struct-markdown/template.go | 33 +++
..._AlicloudAccessConfig-not-required.html.md | 9 +
.../_AlicloudAccessConfig-required.html.md | 13 +
.../_AlicloudDiskDevice-not-required.html.md | 39 +++
.../_AlicloudDiskDevices-not-required.html.md | 8 +
.../_AlicloudImageConfig-not-required.html.md | 53 ++++
.../ecs/_AlicloudImageConfig-required.html.md | 7 +
.../ecs/_RunConfig-not-required.html.md | 78 ++++++
.../alicloud/ecs/_RunConfig-required.html.md | 13 +
.../chroot/_Config-not-required.html.md | 130 +++++++++
.../amazon/chroot/_Config-required.html.md | 7 +
.../_AMIBlockDevices-not-required.html.md | 52 ++++
.../common/_AMIConfig-not-required.html.md | 97 +++++++
.../amazon/common/_AMIConfig-required.html.md | 7 +
.../common/_AccessConfig-not-required.html.md | 65 +++++
.../common/_AccessConfig-required.html.md | 15 ++
.../_AmiFilterOptions-not-required.html.md | 3 +
.../common/_BlockDevice-not-required.html.md | 50 ++++
.../_LaunchBlockDevices-not-required.html.md | 19 ++
.../common/_RunConfig-not-required.html.md | 246 ++++++++++++++++++
.../amazon/common/_RunConfig-required.html.md | 10 +
.../_SubnetFilterOptions-not-required.html.md | 4 +
...VaultAWSEngineOptions-not-required.html.md | 15 ++
.../amazon/ebs/_Config-not-required.html.md | 8 +
.../ebssurrogate/_Config-not-required.html.md | 11 +
.../ebssurrogate/_Config-required.html.md | 10 +
.../_RootBlockDevice-not-required.html.md | 26 ++
.../_BlockDevice-not-required.html.md | 6 +
.../ebsvolume/_Config-not-required.html.md | 19 ++
.../instance/_Config-not-required.html.md | 24 ++
.../amazon/instance/_Config-required.html.md | 18 ++
.../arm/_ClientConfig-not-required.html.md | 20 ++
.../azure/arm/_Config-not-required.html.md | 185 +++++++++++++
.../azure/arm/_Config-required.html.md | 22 ++
.../arm/_PlanInformation-not-required.html.md | 6 +
.../_SharedImageGallery-not-required.html.md | 12 +
.../cloudstack/_Config-not-required.html.md | 97 +++++++
.../cloudstack/_Config-required.html.md | 33 +++
.../digitalocean/_Config-not-required.html.md | 38 +++
.../digitalocean/_Config-required.html.md | 21 ++
.../_AwsAccessConfig-not-required.html.md | 19 ++
.../docker/_Config-not-required.html.md | 62 +++++
.../builder/docker/_Config-required.html.md | 15 ++
.../_Config-not-required.html.md | 119 +++++++++
.../googlecompute/_Config-required.html.md | 17 ++
.../hyperone/_Config-not-required.html.md | 61 +++++
.../builder/hyperone/_Config-required.html.md | 16 ++
.../common/_OutputConfig-not-required.html.md | 10 +
.../_ShutdownConfig-not-required.html.md | 16 ++
.../hyperv/iso/_Config-not-required.html.md | 128 +++++++++
.../hyperv/vmcx/_Config-not-required.html.md | 117 +++++++++
.../builder/lxc/_Config-not-required.html.md | 41 +++
.../builder/lxc/_Config-required.html.md | 9 +
.../builder/lxd/_Config-not-required.html.md | 22 ++
.../builder/lxd/_Config-required.html.md | 6 +
.../ncloud/_Config-not-required.html.md | 32 +++
.../builder/ncloud/_Config-required.html.md | 7 +
.../_AccessConfig-not-required.html.md | 57 ++++
.../openstack/_AccessConfig-required.html.md | 17 ++
.../_ImageConfig-not-required.html.md | 18 ++
.../openstack/_ImageConfig-required.html.md | 4 +
.../_ImageFilter-not-required.html.md | 11 +
.../_ImageFilterOptions-not-required.html.md | 7 +
.../openstack/_RunConfig-not-required.html.md | 85 ++++++
.../openstack/_RunConfig-required.html.md | 19 ++
.../common/_HWConfig-not-required.html.md | 14 +
.../common/_OutputConfig-not-required.html.md | 9 +
.../common/_PrlctlConfig-not-required.html.md | 13 +
.../_PrlctlPostConfig-not-required.html.md | 6 +
.../_PrlctlVersionConfig-not-required.html.md | 8 +
.../_ShutdownConfig-not-required.html.md | 11 +
.../common/_ToolsConfig-not-required.html.md | 17 ++
.../common/_ToolsConfig-required.html.md | 7 +
.../iso/_Config-not-required.html.md | 40 +++
.../pvm/_Config-not-required.html.md | 16 ++
.../parallels/pvm/_Config-required.html.md | 5 +
.../builder/qemu/_Config-not-required.html.md | 140 ++++++++++
.../scaleway/_Config-not-required.html.md | 17 ++
.../builder/scaleway/_Config-required.html.md | 29 +++
...centCloudAccessConfig-not-required.html.md | 4 +
..._TencentCloudAccessConfig-required.html.md | 16 ++
...ncentCloudImageConfig-not-required.html.md | 21 ++
.../_TencentCloudImageConfig-required.html.md | 6 +
...TencentCloudRunConfig-not-required.html.md | 44 ++++
.../_TencentCloudRunConfig-required.html.md | 9 +
.../triton/_AccessConfig-not-required.html.md | 20 ++
.../triton/_AccessConfig-required.html.md | 10 +
.../_MachineImageFilter-not-required.html.md | 3 +
.../_SourceMachineConfig-not-required.html.md | 39 +++
.../_SourceMachineConfig-required.html.md | 19 ++
.../_TargetImageConfig-not-required.html.md | 17 ++
.../_TargetImageConfig-required.html.md | 12 +
.../vagrant/_Config-not-required.html.md | 69 +++++
.../builder/vagrant/_Config-required.html.md | 17 ++
.../common/_ExportConfig-not-required.html.md | 5 +
.../common/_ExportOpts-not-required.html.md | 8 +
..._GuestAdditionsConfig-not-required.html.md | 11 +
.../common/_HWConfig-not-required.html.md | 15 ++
.../common/_OutputConfig-not-required.html.md | 9 +
.../common/_RunConfig-not-required.html.md | 17 ++
.../common/_SSHConfig-not-required.html.md | 16 ++
.../_ShutdownConfig-not-required.html.md | 20 ++
.../_VBoxBundleConfig-not-required.html.md | 7 +
.../_VBoxManageConfig-not-required.html.md | 13 +
..._VBoxManagePostConfig-not-required.html.md | 6 +
.../_VBoxVersionConfig-not-required.html.md | 10 +
.../iso/_Config-not-required.html.md | 77 ++++++
.../ovf/_Config-not-required.html.md | 63 +++++
.../virtualbox/ovf/_Config-required.html.md | 12 +
.../common/_DriverConfig-not-required.html.md | 38 +++
.../common/_ExportConfig-not-required.html.md | 41 +++
.../common/_HWConfig-not-required.html.md | 38 +++
.../common/_OutputConfig-not-required.html.md | 9 +
.../common/_RunConfig-not-required.html.md | 25 ++
.../_ShutdownConfig-not-required.html.md | 11 +
.../common/_ToolsConfig-not-required.html.md | 14 +
.../common/_VMXConfig-not-required.html.md | 23 ++
.../vmware/iso/_Config-not-required.html.md | 65 +++++
.../vmware/vmx/_Config-not-required.html.md | 15 ++
.../vmware/vmx/_Config-required.html.md | 5 +
.../yandex/_Config-not-required.html.md | 67 +++++
.../builder/yandex/_Config-required.html.md | 12 +
...lding_on_remote_vsphere_hypervisor.html.md | 28 +-
.../communicator/_Config-not-required.html.md | 36 +++
.../communicator/_SSH-not-required.html.md | 98 +++++++
.../communicator/_WinRM-not-required.html.md | 29 +++
127 files changed, 4050 insertions(+), 13 deletions(-)
create mode 100644 cmd/struct-markdown/main.go
create mode 100644 cmd/struct-markdown/template.go
create mode 100644 website/source/partials/builder/alicloud/ecs/_AlicloudAccessConfig-not-required.html.md
create mode 100644 website/source/partials/builder/alicloud/ecs/_AlicloudAccessConfig-required.html.md
create mode 100644 website/source/partials/builder/alicloud/ecs/_AlicloudDiskDevice-not-required.html.md
create mode 100644 website/source/partials/builder/alicloud/ecs/_AlicloudDiskDevices-not-required.html.md
create mode 100644 website/source/partials/builder/alicloud/ecs/_AlicloudImageConfig-not-required.html.md
create mode 100644 website/source/partials/builder/alicloud/ecs/_AlicloudImageConfig-required.html.md
create mode 100644 website/source/partials/builder/alicloud/ecs/_RunConfig-not-required.html.md
create mode 100644 website/source/partials/builder/alicloud/ecs/_RunConfig-required.html.md
create mode 100644 website/source/partials/builder/amazon/chroot/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/amazon/chroot/_Config-required.html.md
create mode 100644 website/source/partials/builder/amazon/common/_AMIBlockDevices-not-required.html.md
create mode 100644 website/source/partials/builder/amazon/common/_AMIConfig-not-required.html.md
create mode 100644 website/source/partials/builder/amazon/common/_AMIConfig-required.html.md
create mode 100644 website/source/partials/builder/amazon/common/_AccessConfig-not-required.html.md
create mode 100644 website/source/partials/builder/amazon/common/_AccessConfig-required.html.md
create mode 100644 website/source/partials/builder/amazon/common/_AmiFilterOptions-not-required.html.md
create mode 100644 website/source/partials/builder/amazon/common/_BlockDevice-not-required.html.md
create mode 100644 website/source/partials/builder/amazon/common/_LaunchBlockDevices-not-required.html.md
create mode 100644 website/source/partials/builder/amazon/common/_RunConfig-not-required.html.md
create mode 100644 website/source/partials/builder/amazon/common/_RunConfig-required.html.md
create mode 100644 website/source/partials/builder/amazon/common/_SubnetFilterOptions-not-required.html.md
create mode 100644 website/source/partials/builder/amazon/common/_VaultAWSEngineOptions-not-required.html.md
create mode 100644 website/source/partials/builder/amazon/ebs/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/amazon/ebssurrogate/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/amazon/ebssurrogate/_Config-required.html.md
create mode 100644 website/source/partials/builder/amazon/ebssurrogate/_RootBlockDevice-not-required.html.md
create mode 100644 website/source/partials/builder/amazon/ebsvolume/_BlockDevice-not-required.html.md
create mode 100644 website/source/partials/builder/amazon/ebsvolume/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/amazon/instance/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/amazon/instance/_Config-required.html.md
create mode 100644 website/source/partials/builder/azure/arm/_ClientConfig-not-required.html.md
create mode 100644 website/source/partials/builder/azure/arm/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/azure/arm/_Config-required.html.md
create mode 100644 website/source/partials/builder/azure/arm/_PlanInformation-not-required.html.md
create mode 100644 website/source/partials/builder/azure/arm/_SharedImageGallery-not-required.html.md
create mode 100644 website/source/partials/builder/cloudstack/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/cloudstack/_Config-required.html.md
create mode 100644 website/source/partials/builder/digitalocean/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/digitalocean/_Config-required.html.md
create mode 100644 website/source/partials/builder/docker/_AwsAccessConfig-not-required.html.md
create mode 100644 website/source/partials/builder/docker/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/docker/_Config-required.html.md
create mode 100644 website/source/partials/builder/googlecompute/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/googlecompute/_Config-required.html.md
create mode 100644 website/source/partials/builder/hyperone/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/hyperone/_Config-required.html.md
create mode 100644 website/source/partials/builder/hyperv/common/_OutputConfig-not-required.html.md
create mode 100644 website/source/partials/builder/hyperv/common/_ShutdownConfig-not-required.html.md
create mode 100644 website/source/partials/builder/hyperv/iso/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/hyperv/vmcx/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/lxc/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/lxc/_Config-required.html.md
create mode 100644 website/source/partials/builder/lxd/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/lxd/_Config-required.html.md
create mode 100644 website/source/partials/builder/ncloud/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/ncloud/_Config-required.html.md
create mode 100644 website/source/partials/builder/openstack/_AccessConfig-not-required.html.md
create mode 100644 website/source/partials/builder/openstack/_AccessConfig-required.html.md
create mode 100644 website/source/partials/builder/openstack/_ImageConfig-not-required.html.md
create mode 100644 website/source/partials/builder/openstack/_ImageConfig-required.html.md
create mode 100644 website/source/partials/builder/openstack/_ImageFilter-not-required.html.md
create mode 100644 website/source/partials/builder/openstack/_ImageFilterOptions-not-required.html.md
create mode 100644 website/source/partials/builder/openstack/_RunConfig-not-required.html.md
create mode 100644 website/source/partials/builder/openstack/_RunConfig-required.html.md
create mode 100644 website/source/partials/builder/parallels/common/_HWConfig-not-required.html.md
create mode 100644 website/source/partials/builder/parallels/common/_OutputConfig-not-required.html.md
create mode 100644 website/source/partials/builder/parallels/common/_PrlctlConfig-not-required.html.md
create mode 100644 website/source/partials/builder/parallels/common/_PrlctlPostConfig-not-required.html.md
create mode 100644 website/source/partials/builder/parallels/common/_PrlctlVersionConfig-not-required.html.md
create mode 100644 website/source/partials/builder/parallels/common/_ShutdownConfig-not-required.html.md
create mode 100644 website/source/partials/builder/parallels/common/_ToolsConfig-not-required.html.md
create mode 100644 website/source/partials/builder/parallels/common/_ToolsConfig-required.html.md
create mode 100644 website/source/partials/builder/parallels/iso/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/parallels/pvm/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/parallels/pvm/_Config-required.html.md
create mode 100644 website/source/partials/builder/qemu/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/scaleway/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/scaleway/_Config-required.html.md
create mode 100644 website/source/partials/builder/tencentcloud/cvm/_TencentCloudAccessConfig-not-required.html.md
create mode 100644 website/source/partials/builder/tencentcloud/cvm/_TencentCloudAccessConfig-required.html.md
create mode 100644 website/source/partials/builder/tencentcloud/cvm/_TencentCloudImageConfig-not-required.html.md
create mode 100644 website/source/partials/builder/tencentcloud/cvm/_TencentCloudImageConfig-required.html.md
create mode 100644 website/source/partials/builder/tencentcloud/cvm/_TencentCloudRunConfig-not-required.html.md
create mode 100644 website/source/partials/builder/tencentcloud/cvm/_TencentCloudRunConfig-required.html.md
create mode 100644 website/source/partials/builder/triton/_AccessConfig-not-required.html.md
create mode 100644 website/source/partials/builder/triton/_AccessConfig-required.html.md
create mode 100644 website/source/partials/builder/triton/_MachineImageFilter-not-required.html.md
create mode 100644 website/source/partials/builder/triton/_SourceMachineConfig-not-required.html.md
create mode 100644 website/source/partials/builder/triton/_SourceMachineConfig-required.html.md
create mode 100644 website/source/partials/builder/triton/_TargetImageConfig-not-required.html.md
create mode 100644 website/source/partials/builder/triton/_TargetImageConfig-required.html.md
create mode 100644 website/source/partials/builder/vagrant/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/vagrant/_Config-required.html.md
create mode 100644 website/source/partials/builder/virtualbox/common/_ExportConfig-not-required.html.md
create mode 100644 website/source/partials/builder/virtualbox/common/_ExportOpts-not-required.html.md
create mode 100644 website/source/partials/builder/virtualbox/common/_GuestAdditionsConfig-not-required.html.md
create mode 100644 website/source/partials/builder/virtualbox/common/_HWConfig-not-required.html.md
create mode 100644 website/source/partials/builder/virtualbox/common/_OutputConfig-not-required.html.md
create mode 100644 website/source/partials/builder/virtualbox/common/_RunConfig-not-required.html.md
create mode 100644 website/source/partials/builder/virtualbox/common/_SSHConfig-not-required.html.md
create mode 100644 website/source/partials/builder/virtualbox/common/_ShutdownConfig-not-required.html.md
create mode 100644 website/source/partials/builder/virtualbox/common/_VBoxBundleConfig-not-required.html.md
create mode 100644 website/source/partials/builder/virtualbox/common/_VBoxManageConfig-not-required.html.md
create mode 100644 website/source/partials/builder/virtualbox/common/_VBoxManagePostConfig-not-required.html.md
create mode 100644 website/source/partials/builder/virtualbox/common/_VBoxVersionConfig-not-required.html.md
create mode 100644 website/source/partials/builder/virtualbox/iso/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/virtualbox/ovf/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/virtualbox/ovf/_Config-required.html.md
create mode 100644 website/source/partials/builder/vmware/common/_DriverConfig-not-required.html.md
create mode 100644 website/source/partials/builder/vmware/common/_ExportConfig-not-required.html.md
create mode 100644 website/source/partials/builder/vmware/common/_HWConfig-not-required.html.md
create mode 100644 website/source/partials/builder/vmware/common/_OutputConfig-not-required.html.md
create mode 100644 website/source/partials/builder/vmware/common/_RunConfig-not-required.html.md
create mode 100644 website/source/partials/builder/vmware/common/_ShutdownConfig-not-required.html.md
create mode 100644 website/source/partials/builder/vmware/common/_ToolsConfig-not-required.html.md
create mode 100644 website/source/partials/builder/vmware/common/_VMXConfig-not-required.html.md
create mode 100644 website/source/partials/builder/vmware/iso/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/vmware/vmx/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/vmware/vmx/_Config-required.html.md
create mode 100644 website/source/partials/builder/yandex/_Config-not-required.html.md
create mode 100644 website/source/partials/builder/yandex/_Config-required.html.md
create mode 100644 website/source/partials/helper/communicator/_Config-not-required.html.md
create mode 100644 website/source/partials/helper/communicator/_SSH-not-required.html.md
create mode 100644 website/source/partials/helper/communicator/_WinRM-not-required.html.md
diff --git a/cmd/struct-markdown/main.go b/cmd/struct-markdown/main.go
new file mode 100644
index 000000000..e5a31ff5d
--- /dev/null
+++ b/cmd/struct-markdown/main.go
@@ -0,0 +1,138 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/fatih/camelcase"
+ "github.com/fatih/structtag"
+)
+
+func main() {
+ args := flag.Args()
+ if len(args) == 0 {
+ // Default: process the file
+ args = []string{os.Getenv("GOFILE")}
+ }
+ fname := args[0]
+
+ absFilePath, err := filepath.Abs(fname)
+ if err != nil {
+ panic(err)
+ }
+ paths := strings.SplitAfter(absFilePath, "packer"+string(os.PathSeparator))
+ packerDir := paths[0]
+ builderName, _ := filepath.Split(paths[1])
+ builderName = strings.Trim(builderName, string(os.PathSeparator))
+
+ b, err := ioutil.ReadFile(fname)
+ if err != nil {
+ fmt.Printf("ReadFile: %+v", err)
+ os.Exit(1)
+ }
+
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, fname, b, parser.ParseComments)
+ if err != nil {
+ fmt.Printf("ParseFile: %+v", err)
+ os.Exit(1)
+ }
+
+ for _, decl := range f.Decls {
+ typeDecl, ok := decl.(*ast.GenDecl)
+ if !ok {
+ continue
+ }
+ typeSpec, ok := typeDecl.Specs[0].(*ast.TypeSpec)
+ if !ok {
+ continue
+ }
+ structDecl, ok := typeSpec.Type.(*ast.StructType)
+ if !ok {
+ continue
+ }
+
+ fields := structDecl.Fields.List
+ required := Struct{
+ SourcePath: paths[1],
+ Name: typeSpec.Name.Name,
+ Filename: "_" + typeSpec.Name.Name + "-required.html.md",
+ }
+ notRequired := Struct{
+ SourcePath: paths[1],
+ Name: typeSpec.Name.Name,
+ Filename: "_" + typeSpec.Name.Name + "-not-required.html.md",
+ }
+
+ for _, field := range fields {
+ if len(field.Names) == 0 || field.Tag == nil {
+ continue
+ }
+ tag := field.Tag.Value[1:]
+ tag = tag[:len(tag)-1]
+ tags, err := structtag.Parse(tag)
+ if err != nil {
+ fmt.Printf("structtag.Parse(%s): err: %v", field.Tag.Value, err)
+ os.Exit(1)
+ }
+
+ mstr, err := tags.Get("mapstructure")
+ if err != nil {
+ continue
+ }
+ name := mstr.Name
+
+ if name == "" {
+ continue
+ }
+
+ var docs string
+ if field.Doc != nil {
+ docs = field.Doc.Text()
+ } else {
+ docs = strings.Join(camelcase.Split(field.Names[0].Name), " ")
+ }
+
+ field := Field{
+ Name: name,
+ Type: fmt.Sprintf("%s", b[field.Type.Pos()-1:field.Type.End()-1]),
+ Docs: docs,
+ }
+ if req, err := tags.Get("required"); err == nil && req.Value() == "true" {
+ required.Fields = append(required.Fields, field)
+ } else {
+ notRequired.Fields = append(notRequired.Fields, field)
+ }
+ }
+
+ dir := filepath.Join(packerDir, "website", "source", "partials", builderName)
+ os.MkdirAll(dir, 0755)
+
+ for _, str := range []Struct{required, notRequired} {
+ if len(str.Fields) == 0 {
+ continue
+ }
+ outputPath := filepath.Join(dir, str.Filename)
+
+ outputFile, err := os.Create(outputPath)
+ if err != nil {
+ panic(err)
+ }
+ defer outputFile.Close()
+
+ err = structDocsTemplate.Execute(outputFile, str)
+ if err != nil {
+ fmt.Printf("%v", err)
+ os.Exit(1)
+ }
+ }
+ }
+
+}
diff --git a/cmd/struct-markdown/template.go b/cmd/struct-markdown/template.go
new file mode 100644
index 000000000..e4a3e1a87
--- /dev/null
+++ b/cmd/struct-markdown/template.go
@@ -0,0 +1,33 @@
+package main
+
+import (
+ "strings"
+ "text/template"
+)
+
+type Field struct {
+ Name string
+ Type string
+ Docs string
+}
+
+type Struct struct {
+ SourcePath string
+ Name string
+ Filename string
+ Fields []Field
+}
+
+var structDocsTemplate = template.Must(template.New("structDocsTemplate").
+ Funcs(template.FuncMap{
+ "indent": indent,
+ }).
+ Parse(`
+{{range .Fields}}
+- ` + "`" + `{{ .Name}}` + "`" + ` ({{ .Type }}) - {{ .Docs | indent 4 }}
+{{- end -}}`))
+
+func indent(spaces int, v string) string {
+ pad := strings.Repeat(" ", spaces)
+ return strings.Replace(v, "\n", "\n"+pad, -1)
+}
diff --git a/website/source/partials/builder/alicloud/ecs/_AlicloudAccessConfig-not-required.html.md b/website/source/partials/builder/alicloud/ecs/_AlicloudAccessConfig-not-required.html.md
new file mode 100644
index 000000000..7de4365d3
--- /dev/null
+++ b/website/source/partials/builder/alicloud/ecs/_AlicloudAccessConfig-not-required.html.md
@@ -0,0 +1,9 @@
+
+
+- `skip_region_validation` (bool) - The region validation can be skipped
+ if this value is true, the default value is false.
+
+- `security_token` (string) - STS access token, can be set through template
+ or by exporting as environment variable such as
+ export SecurityToken=value.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/alicloud/ecs/_AlicloudAccessConfig-required.html.md b/website/source/partials/builder/alicloud/ecs/_AlicloudAccessConfig-required.html.md
new file mode 100644
index 000000000..9e9efea26
--- /dev/null
+++ b/website/source/partials/builder/alicloud/ecs/_AlicloudAccessConfig-required.html.md
@@ -0,0 +1,13 @@
+
+
+- `access_key` (string) - This is the Alicloud access key. It must be
+ provided, but it can also be sourced from the ALICLOUD_ACCESS_KEY
+ environment variable.
+
+- `secret_key` (string) - This is the Alicloud secret key. It must be
+ provided, but it can also be sourced from the ALICLOUD_SECRET_KEY
+ environment variable.
+
+- `region` (string) - This is the Alicloud region. It must be provided, but
+ it can also be sourced from the ALICLOUD_REGION environment variables.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/alicloud/ecs/_AlicloudDiskDevice-not-required.html.md b/website/source/partials/builder/alicloud/ecs/_AlicloudDiskDevice-not-required.html.md
new file mode 100644
index 000000000..eebbf129d
--- /dev/null
+++ b/website/source/partials/builder/alicloud/ecs/_AlicloudDiskDevice-not-required.html.md
@@ -0,0 +1,39 @@
+
+
+- `disk_name` (string) - The value of disk name is blank by default. [2,
+ 128] English or Chinese characters, must begin with an
+ uppercase/lowercase letter or Chinese character. Can contain numbers,
+ ., _ and -. The disk name will appear on the console. It cannot
+ begin with http:// or https://.
+
+- `disk_category` (string) - Category of the system disk. Optional values
+ are:
+ - cloud - general cloud disk
+ - cloud_efficiency - efficiency cloud disk
+ - cloud_ssd - cloud SSD
+
+- `disk_size` (int) - Size of the system disk, measured in GiB. Value
+ range: [20, 500]. The specified value must be equal to or greater
+ than max{20, ImageSize}. Default value: max{40, ImageSize}.
+
+- `disk_snapshot_id` (string) - Snapshots are used to create the data
+ disk After this parameter is specified, Size is ignored. The actual
+ size of the created disk is the size of the specified snapshot.
+
+- `disk_description` (string) - The value of disk description is blank by
+ default. [2, 256] characters. The disk description will appear on the
+ console. It cannot begin with http:// or https://.
+
+- `disk_delete_with_instance` (bool) - Whether or not the disk is
+ released along with the instance:
+
+- `disk_device` (string) - Device information of the related instance:
+ such as /dev/xvdb It is null unless the Status is In_use.
+
+- `disk_encrypted` (*bool) - Whether or not to encrypt the data disk.
+ If this option is set to true, the data disk will be encryped and corresponding snapshot in the target image will also be encrypted. By
+ default, if this is an extra data disk, Packer will not encrypt the
+ data disk. Otherwise, Packer will keep the encryption setting to what
+ it was in the source image. Please refer to Introduction of ECS disk encryption
+ for more details.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/alicloud/ecs/_AlicloudDiskDevices-not-required.html.md b/website/source/partials/builder/alicloud/ecs/_AlicloudDiskDevices-not-required.html.md
new file mode 100644
index 000000000..812ef7027
--- /dev/null
+++ b/website/source/partials/builder/alicloud/ecs/_AlicloudDiskDevices-not-required.html.md
@@ -0,0 +1,8 @@
+
+
+- `system_disk_mapping` (AlicloudDiskDevice) - Image disk mapping for system
+ disk.
+
+- `image_disk_mappings` ([]AlicloudDiskDevice) - Add one or more data
+ disks to the image.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/alicloud/ecs/_AlicloudImageConfig-not-required.html.md b/website/source/partials/builder/alicloud/ecs/_AlicloudImageConfig-not-required.html.md
new file mode 100644
index 000000000..b7cc24f02
--- /dev/null
+++ b/website/source/partials/builder/alicloud/ecs/_AlicloudImageConfig-not-required.html.md
@@ -0,0 +1,53 @@
+
+
+- `image_version` (string) - The version number of the image, with a length
+ limit of 1 to 40 English characters.
+
+- `image_description` (string) - The description of the image, with a length
+ limit of 0 to 256 characters. Leaving it blank means null, which is the
+ default value. It cannot begin with http:// or https://.
+
+- `image_share_account` ([]string) - The IDs of to-be-added Aliyun
+ accounts to which the image is shared. The number of accounts is 1 to 10.
+ If number of accounts is greater than 10, this parameter is ignored.
+
+- `image_unshare_account` ([]string) - Alicloud Image UN Share Accounts
+- `image_copy_regions` ([]string) - Copy to the destination regionIds.
+
+- `image_copy_names` ([]string) - The name of the destination image,
+ [2, 128] English or Chinese characters. It must begin with an
+ uppercase/lowercase letter or a Chinese character, and may contain numbers,
+ _ or -. It cannot begin with http:// or https://.
+
+- `image_encrypted` (*bool) - Whether or not to encrypt the target images, including those copied if image_copy_regions is specified. If this option
+ is set to true, a temporary image will be created from the provisioned
+ instance in the main region and an encrypted copy will be generated in the
+ same region. By default, Packer will keep the encryption setting to what
+ it was in the source image.
+
+- `image_force_delete` (bool) - If this value is true, when the target
+ image names including those copied are duplicated with existing images, it
+ will delete the existing images and then create the target images,
+ otherwise, the creation will fail. The default value is false. Check
+ image_name and image_copy_names options for names of target images. If
+ -force option is
+ provided in build command, this option can be omitted and taken as true.
+
+- `image_force_delete_snapshots` (bool) - If this value is true, when
+ delete the duplicated existing images, the source snapshots of those images
+ will be delete either. If
+ -force option is
+ provided in build command, this option can be omitted and taken as true.
+
+- `image_force_delete_instances` (bool) - Alicloud Image Force Delete Instances
+- `image_ignore_data_disks` (bool) - If this value is true, the image
+ created will not include any snapshot of data disks. This option would be
+ useful for any circumstance that default data disks with instance types are
+ not concerned. The default value is false.
+
+- `skip_region_validation` (bool) - The region validation can be skipped
+ if this value is true, the default value is false.
+
+- `tags` (map[string]string) - Tags applied to the destination
+ image and relevant snapshots.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/alicloud/ecs/_AlicloudImageConfig-required.html.md b/website/source/partials/builder/alicloud/ecs/_AlicloudImageConfig-required.html.md
new file mode 100644
index 000000000..f961c43e0
--- /dev/null
+++ b/website/source/partials/builder/alicloud/ecs/_AlicloudImageConfig-required.html.md
@@ -0,0 +1,7 @@
+
+
+- `image_name` (string) - The name of the user-defined image, [2, 128]
+ English or Chinese characters. It must begin with an uppercase/lowercase
+ letter or a Chinese character, and may contain numbers, _ or -. It
+ cannot begin with http:// or https://.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/alicloud/ecs/_RunConfig-not-required.html.md b/website/source/partials/builder/alicloud/ecs/_RunConfig-not-required.html.md
new file mode 100644
index 000000000..35e38f767
--- /dev/null
+++ b/website/source/partials/builder/alicloud/ecs/_RunConfig-not-required.html.md
@@ -0,0 +1,78 @@
+
+
+- `associate_public_ip_address` (bool) - Associate Public Ip Address
+- `zone_id` (string) - ID of the zone to which the disk belongs.
+
+- `io_optimized` (bool) - Whether an ECS instance is I/O optimized or not.
+ The default value is false.
+
+- `description` (string) - Description
+- `force_stop_instance` (bool) - Whether to force shutdown upon device
+ restart. The default value is false.
+
+- `disable_stop_instance` (bool) - If this option is set to true, Packer
+ will not stop the instance for you, and you need to make sure the instance
+ will be stopped in the final provisioner command. Otherwise, Packer will
+ timeout while waiting the instance to be stopped. This option is provided
+ for some specific scenarios that you want to stop the instance by yourself.
+ E.g., Sysprep a windows which may shutdown the instance within its command.
+ The default value is false.
+
+- `security_group_id` (string) - ID of the security group to which a newly
+ created instance belongs. Mutual access is allowed between instances in one
+ security group. If not specified, the newly created instance will be added
+ to the default security group. If the default group doesn’t exist, or the
+ number of instances in it has reached the maximum limit, a new security
+ group will be created automatically.
+
+- `security_group_name` (string) - The security group name. The default value
+ is blank. [2, 128] English or Chinese characters, must begin with an
+ uppercase/lowercase letter or Chinese character. Can contain numbers, .,
+ _ or -. It cannot begin with http:// or https://.
+
+- `user_data` (string) - User data to apply when launching the instance. Note
+ that you need to be careful about escaping characters due to the templates
+ being JSON. It is often more convenient to use user_data_file, instead.
+ Packer will not automatically wait for a user script to finish before
+ shutting down the instance this must be handled in a provisioner.
+
+- `user_data_file` (string) - Path to a file that will be used for the user
+ data when launching the instance.
+
+- `vpc_id` (string) - VPC ID allocated by the system.
+
+- `vpc_name` (string) - The VPC name. The default value is blank. [2, 128]
+ English or Chinese characters, must begin with an uppercase/lowercase
+ letter or Chinese character. Can contain numbers, _ and -. The disk
+ description will appear on the console. Cannot begin with http:// or
+ https://.
+
+- `vpc_cidr_block` (string) - Value options: 192.168.0.0/16 and
+ 172.16.0.0/16. When not specified, the default value is 172.16.0.0/16.
+
+- `vswitch_id` (string) - The ID of the VSwitch to be used.
+
+- `vswitch_id` (string) - The ID of the VSwitch to be used.
+
+- `instance_name` (string) - Display name of the instance, which is a string
+ of 2 to 128 Chinese or English characters. It must begin with an
+ uppercase/lowercase letter or a Chinese character and can contain numerals,
+ ., _, or -. The instance name is displayed on the Alibaba Cloud
+ console. If this parameter is not specified, the default value is
+ InstanceId of the instance. It cannot begin with http:// or https://.
+
+- `internet_charge_type` (string) - Internet charge type, which can be
+ PayByTraffic or PayByBandwidth. Optional values:
+
+- `internet_max_bandwidth_out` (int) - Maximum outgoing bandwidth to the
+ public network, measured in Mbps (Mega bits per second).
+
+- `wait_snapshot_ready_timeout` (int) - Timeout of creating snapshot(s).
+ The default timeout is 3600 seconds if this option is not set or is set
+ to 0. For those disks containing lots of data, it may require a higher
+ timeout value.
+
+- `ssh_private_ip` (bool) - If this value is true, packer will connect to
+ the ECS created through private ip instead of allocating a public ip or an
+ EIP. The default value is false.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/alicloud/ecs/_RunConfig-required.html.md b/website/source/partials/builder/alicloud/ecs/_RunConfig-required.html.md
new file mode 100644
index 000000000..c617940a9
--- /dev/null
+++ b/website/source/partials/builder/alicloud/ecs/_RunConfig-required.html.md
@@ -0,0 +1,13 @@
+
+
+- `instance_type` (string) - Type of the instance. For values, see Instance
+ Type
+ Table.
+ You can also obtain the latest instance type table by invoking the
+ Querying Instance Type
+ Table
+ interface.
+
+- `source_image` (string) - This is the base image id which you want to
+ create your customized images.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/chroot/_Config-not-required.html.md b/website/source/partials/builder/amazon/chroot/_Config-not-required.html.md
new file mode 100644
index 000000000..e21aaf40d
--- /dev/null
+++ b/website/source/partials/builder/amazon/chroot/_Config-not-required.html.md
@@ -0,0 +1,130 @@
+
+
+- `chroot_mounts` ([][]string) - This is a list of devices to
+ mount into the chroot environment. This configuration parameter requires
+ some additional documentation which is in the Chroot
+ Mounts section. Please read that section for more
+ information on how to use this.
+
+- `command_wrapper` (string) - How to run shell commands. This defaults to
+ {{.Command}}. This may be useful to set if you want to set environmental
+ variables or perhaps run it with sudo or so on. This is a configuration
+ template where the .Command variable is replaced with the command to be
+ run. Defaults to {{.Command}}.
+
+- `copy_files` ([]string) - Paths to files on the running EC2
+ instance that will be copied into the chroot environment prior to
+ provisioning. Defaults to /etc/resolv.conf so that DNS lookups work. Pass
+ an empty list to skip copying /etc/resolv.conf. You may need to do this
+ if you're building an image that uses systemd.
+
+- `device_path` (string) - The path to the device where the root volume of
+ the source AMI will be attached. This defaults to "" (empty string), which
+ forces Packer to find an open device automatically.
+
+- `nvme_device_path` (string) - When we call the mount command (by default
+ mount -o device dir), the string provided in nvme_mount_path will
+ replace device in that command. When this option is not set, device in
+ that command will be something like /dev/sdf1, mirroring the attached
+ device name. This assumption works for most instances but will fail with c5
+ and m5 instances. In order to use the chroot builder with c5 and m5
+ instances, you must manually set nvme_device_path and device_path.
+
+- `from_scratch` (bool) - Build a new volume instead of starting from an
+ existing AMI root volume snapshot. Default false. If true, source_ami
+ is no longer used and the following options become required:
+ ami_virtualization_type, pre_mount_commands and root_volume_size. The
+ below options are also required in this mode only:
+
+- `mount_options` ([]string) - Options to supply the mount command
+ when mounting devices. Each option will be prefixed with -o and supplied
+ to the mount command ran by Packer. Because this command is ran in a
+ shell, user discretion is advised. See this manual page for the mount
+ command for valid file
+ system specific options.
+
+- `mount_partition` (string) - The partition number containing the /
+ partition. By default this is the first partition of the volume, (for
+ example, xvda1) but you can designate the entire block device by setting
+ "mount_partition": "0" in your config, which will mount xvda instead.
+
+- `mount_path` (string) - The path where the volume will be mounted. This is
+ where the chroot environment will be. This defaults to
+ /mnt/packer-amazon-chroot-volumes/{{.Device}}. This is a configuration
+ template where the .Device variable is replaced with the name of the
+ device where the volume is attached.
+
+- `post_mount_commands` ([]string) - As pre_mount_commands, but the
+ commands are executed after mounting the root device and before the extra
+ mount and copy steps. The device and mount path are provided by
+ {{.Device}} and {{.MountPath}}.
+
+- `pre_mount_commands` ([]string) - A series of commands to execute
+ after attaching the root volume and before mounting the chroot. This is not
+ required unless using from_scratch. If so, this should include any
+ partitioning and filesystem creation commands. The path to the device is
+ provided by {{.Device}}.
+
+- `root_device_name` (string) - The root device name. For example, xvda.
+
+- `root_volume_size` (int64) - The size of the root volume in GB for the
+ chroot environment and the resulting AMI. Default size is the snapshot size
+ of the source_ami unless from_scratch is true, in which case this
+ field must be defined.
+
+- `root_volume_type` (string) - The type of EBS volume for the chroot
+ environment and resulting AMI. The default value is the type of the
+ source_ami, unless from_scratch is true, in which case the default
+ value is gp2. You can only specify io1 if building based on top of a
+ source_ami which is also io1.
+
+- `source_ami_filter` (awscommon.AmiFilterOptions) - Filters used to populate the source_ami
+ field. Example:
+
+ ``` json
+ {
+ "source_ami_filter": {
+ "filters": {
+ "virtualization-type": "hvm",
+ "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
+ "root-device-type": "ebs"
+ },
+ "owners": ["099720109477"],
+ "most_recent": true
+ }
+ }
+ ```
+
+ This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
+ This will fail unless *exactly* one AMI is returned. In the above example,
+ `most_recent` will cause this to succeed by selecting the newest image.
+
+ - `filters` (map of strings) - filters used to select a `source_ami`.
+ NOTE: This will fail unless *exactly* one AMI is returned. Any filter
+ described in the docs for
+ [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
+ is valid.
+
+ - `owners` (array of strings) - Filters the images by their owner. You
+ may specify one or more AWS account IDs, "self" (which will use the
+ account whose credentials you are using to run Packer), or an AWS owner
+ alias: for example, "amazon", "aws-marketplace", or "microsoft". This
+ option is required for security reasons.
+
+ - `most_recent` (boolean) - Selects the newest created image when true.
+ This is most useful for selecting a daily distro build.
+
+ You may set this in place of `source_ami` or in conjunction with it. If you
+ set this in conjunction with `source_ami`, the `source_ami` will be added
+ to the filter. The provided `source_ami` must meet all of the filtering
+ criteria provided in `source_ami_filter`; this pins the AMI returned by the
+ filter, but will cause Packer to fail if the `source_ami` does not exist.
+
+- `root_volume_tags` (awscommon.TagMap) - Tags to apply to the
+ volumes that are *launched*. This is a [template
+ engine](/docs/templates/engine.html), see [Build template
+ data](#build-template-data) for more information.
+
+- `ami_architecture` (string) - what architecture to use when registering the
+ final AMI; valid options are "x86_64" or "arm64". Defaults to "x86_64".
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/chroot/_Config-required.html.md b/website/source/partials/builder/amazon/chroot/_Config-required.html.md
new file mode 100644
index 000000000..61be88601
--- /dev/null
+++ b/website/source/partials/builder/amazon/chroot/_Config-required.html.md
@@ -0,0 +1,7 @@
+
+
+- `source_ami` (string) - The source AMI whose root volume will be copied and
+ provisioned on the currently running instance. This must be an EBS-backed
+ AMI with a root volume snapshot that you have access to. Note: this is not
+ used when from_scratch is set to true.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/common/_AMIBlockDevices-not-required.html.md b/website/source/partials/builder/amazon/common/_AMIBlockDevices-not-required.html.md
new file mode 100644
index 000000000..831deb552
--- /dev/null
+++ b/website/source/partials/builder/amazon/common/_AMIBlockDevices-not-required.html.md
@@ -0,0 +1,52 @@
+
+
+- `ami_block_device_mappings` ([]BlockDevice) - Add one or more [block device
+ mappings](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html)
+ to the AMI. These will be attached when booting a new instance from your
+ AMI. To add a block device during the Packer build see
+ `launch_block_device_mappings` below. Your options here may vary
+ depending on the type of VM you use. The block device mappings allow for
+ the following configuration:
+ - `delete_on_termination` (boolean) - Indicates whether the EBS volume is
+ deleted on instance termination. Default `false`. **NOTE**: If this
+ value is not explicitly set to `true` and volumes are not cleaned up by
+ an alternative method, additional volumes will accumulate after every
+ build.
+
+ - `device_name` (string) - The device name exposed to the instance (for
+ example, `/dev/sdh` or `xvdh`). Required for every device in the block
+ device mapping.
+
+ - `encrypted` (boolean) - Indicates whether or not to encrypt the volume.
+ By default, Packer will keep the encryption setting to what it was in
+ the source image. Setting `false` will result in an unencrypted device,
+ and `true` will result in an encrypted one.
+
+ - `iops` (number) - The number of I/O operations per second (IOPS) that
+ the volume supports. See the documentation on
+ [IOPs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html)
+ for more information
+
+ - `kms_key_id` (string) - The ARN for the KMS encryption key. When
+ specifying `kms_key_id`, `encrypted` needs to be set to `true`. For
+ valid formats see *KmsKeyId* in the [AWS API docs -
+ CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
+
+ - `no_device` (boolean) - Suppresses the specified device included in the
+ block device mapping of the AMI.
+
+ - `snapshot_id` (string) - The ID of the snapshot.
+
+ - `virtual_name` (string) - The virtual device name. See the
+ documentation on [Block Device
+ Mapping](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html)
+ for more information.
+
+ - `volume_size` (number) - The size of the volume, in GiB. Required if
+ not specifying a `snapshot_id`.
+
+ - `volume_type` (string) - The volume type. `gp2` for General Purpose
+ (SSD) volumes, `io1` for Provisioned IOPS (SSD) volumes, `st1` for
+ Throughput Optimized HDD, `sc1` for Cold HDD, and `standard` for
+ Magnetic volumes.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/common/_AMIConfig-not-required.html.md b/website/source/partials/builder/amazon/common/_AMIConfig-not-required.html.md
new file mode 100644
index 000000000..8c1d75f7e
--- /dev/null
+++ b/website/source/partials/builder/amazon/common/_AMIConfig-not-required.html.md
@@ -0,0 +1,97 @@
+
+
+- `ami_description` (string) - The description to set for the resulting
+ AMI(s). By default this description is empty. This is a template
+ engine, see Build template
+ data for more information.
+
+- `ami_virtualization_type` (string) - The description to set for the resulting AMI(s). By default this
+ description is empty. This is a [template
+ engine](../templates/engine.html), see [Build template
+ data](#build-template-data) for more information.
+
+- `ami_users` ([]string) - A list of account IDs that have access to
+ launch the resulting AMI(s). By default no additional users other than the
+ user creating the AMI has permissions to launch it.
+
+- `ami_groups` ([]string) - A list of groups that have access to
+ launch the resulting AMI(s). By default no groups have permission to launch
+ the AMI. all will make the AMI publicly accessible.
+
+- `ami_product_codes` ([]string) - A list of product codes to
+ associate with the AMI. By default no product codes are associated with the
+ AMI.
+
+- `ami_regions` ([]string) - A list of regions to copy the AMI to.
+ Tags and attributes are copied along with the AMI. AMI copying takes time
+ depending on the size of the AMI, but will generally take many minutes.
+
+- `skip_region_validation` (bool) - Set to true if you want to skip
+ validation of the ami_regions configuration option. Default false.
+
+- `tags` (TagMap) - Tags applied to the AMI. This is a
+ [template engine](/docs/templates/engine.html), see [Build template
+ data](#build-template-data) for more information.
+
+- `ena_support` (*bool) - Enable enhanced networking (ENA but not
+ SriovNetSupport) on HVM-compatible AMIs. If set, add
+ ec2:ModifyInstanceAttribute to your AWS IAM policy. If false, this will
+ disable enhanced networking in the final AMI as opposed to passing the
+ setting through unchanged from the source. Note: you must make sure
+ enhanced networking is enabled on your instance. [Amazon's
+ documentation on enabling enhanced
+ networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
+
+- `sriov_support` (bool) - Enable enhanced networking (SriovNetSupport but not ENA) on
+ HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your
+ AWS IAM policy. Note: you must make sure enhanced networking is enabled
+ on your instance. See [Amazon's documentation on enabling enhanced
+ networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
+ Default `false`.
+
+- `force_deregister` (bool) - Force Packer to first deregister an existing
+ AMI if one with the same name already exists. Default false.
+
+- `force_delete_snapshot` (bool) - Force Packer to delete snapshots
+ associated with AMIs, which have been deregistered by force_deregister.
+ Default false.
+
+- `encrypt_boot` (*bool) - Whether or not to encrypt the resulting AMI when
+ copying a provisioned instance to an AMI. By default, Packer will keep the
+ encryption setting to what it was in the source image. Setting false will
+ result in an unencrypted image, and true will result in an encrypted one.
+
+- `kms_key_id` (string) - ID, alias or ARN of the KMS key to use for boot volume encryption. This
+ only applies to the main `region`, other regions where the AMI will be
+ copied will be encrypted by the default EBS KMS key. For valid formats
+ see *KmsKeyId* in the [AWS API docs -
+ CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
+ This field is validated by Packer, when using an alias, you will have to
+ prefix `kms_key_id` with `alias/`.
+
+- `region_kms_key_ids` (map[string]string) - regions to copy the ami to, along with the custom kms key id (alias or
+ arn) to use for encryption for that region. Keys must match the regions
+ provided in `ami_regions`. If you just want to encrypt using a default
+ ID, you can stick with `kms_key_id` and `ami_regions`. If you want a
+ region to be encrypted with that region's default key ID, you can use an
+ empty string `""` instead of a key id in this map. (e.g. `"us-east-1":
+ ""`) However, you cannot use default key IDs if you are using this in
+ conjunction with `snapshot_users` -- in that situation you must use
+ custom keys. For valid formats see *KmsKeyId* in the [AWS API docs -
+ CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
+
+- `snapshot_tags` (TagMap) - Tags to apply to snapshot.
+ They will override AMI tags if already applied to snapshot. This is a
+ [template engine](../templates/engine.html), see [Build template
+ data](#build-template-data) for more information.
+
+- `snapshot_users` ([]string) - A list of account IDs that have
+ access to create volumes from the snapshot(s). By default no additional
+ users other than the user creating the AMI has permissions to create
+ volumes from the backing snapshot(s).
+
+- `snapshot_groups` ([]string) - A list of groups that have access to
+ create volumes from the snapshot(s). By default no groups have permission
+ to create volumes from the snapshot(s). all will make the snapshot
+ publicly accessible.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/common/_AMIConfig-required.html.md b/website/source/partials/builder/amazon/common/_AMIConfig-required.html.md
new file mode 100644
index 000000000..f7d6bea02
--- /dev/null
+++ b/website/source/partials/builder/amazon/common/_AMIConfig-required.html.md
@@ -0,0 +1,7 @@
+
+
+- `ami_name` (string) - The name of the resulting AMI that will appear when
+ managing AMIs in the AWS console or via APIs. This must be unique. To help
+ make this unique, use a function like timestamp (see [template
+ engine](../templates/engine.html) for more info).
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/common/_AccessConfig-not-required.html.md b/website/source/partials/builder/amazon/common/_AccessConfig-not-required.html.md
new file mode 100644
index 000000000..280afe8a1
--- /dev/null
+++ b/website/source/partials/builder/amazon/common/_AccessConfig-not-required.html.md
@@ -0,0 +1,65 @@
+
+
+- `custom_endpoint_ec2` (string) - This option is useful if you use a cloud
+ provider whose API is compatible with aws EC2. Specify another endpoint
+ like this https://ec2.custom.endpoint.com.
+
+- `decode_authorization_messages` (bool) - Enable automatic decoding of any encoded authorization (error) messages
+ using the `sts:DecodeAuthorizationMessage` API. Note: requires that the
+ effective user/role have permissions to `sts:DecodeAuthorizationMessage`
+ on resource `*`. Default `false`.
+
+- `insecure_skip_tls_verify` (bool) - This allows skipping TLS
+ verification of the AWS EC2 endpoint. The default is false.
+
+- `mfa_code` (string) - The MFA TOTP code. This should probably be a user variable since it
+ changes all the time.
+
+- `profile` (string) - The profile to use in the shared credentials file for
+ AWS. See Amazon's documentation on [specifying
+ profiles](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-profiles)
+ for more details.
+
+- `skip_region_validation` (bool) - Set to true if you want to skip
+ validation of the ami_regions configuration option. Default false.
+
+- `skip_metadata_api_check` (bool) - Skip Metadata Api Check
+- `token` (string) - The access token to use. This is different from the
+ access key and secret key. If you're not sure what this is, then you
+ probably don't need it. This will also be read from the AWS_SESSION_TOKEN
+ environmental variable.
+
+- `vault_aws_engine` (VaultAWSEngineOptions) - Get credentials from Hashicorp Vault's aws secrets engine. You must
+ already have created a role to use. For more information about
+ generating credentials via the Vault engine, see the [Vault
+ docs.](https://www.vaultproject.io/api/secret/aws/index.html#generate-credentials)
+ If you set this flag, you must also set the below options:
+ - `name` (string) - Required. Specifies the name of the role to generate
+ credentials against. This is part of the request URL.
+ - `engine_name` (string) - The name of the aws secrets engine. In the
+ Vault docs, this is normally referred to as "aws", and Packer will
+ default to "aws" if `engine_name` is not set.
+ - `role_arn` (string)- The ARN of the role to assume if credential\_type
+ on the Vault role is assumed\_role. Must match one of the allowed role
+ ARNs in the Vault role. Optional if the Vault role only allows a single
+ AWS role ARN; required otherwise.
+ - `ttl` (string) - Specifies the TTL for the use of the STS token. This
+ is specified as a string with a duration suffix. Valid only when
+ credential\_type is assumed\_role or federation\_token. When not
+ specified, the default\_sts\_ttl set for the role will be used. If that
+ is also not set, then the default value of 3600s will be used. AWS
+ places limits on the maximum TTL allowed. See the AWS documentation on
+ the DurationSeconds parameter for AssumeRole (for assumed\_role
+ credential types) and GetFederationToken (for federation\_token
+ credential types) for more details.
+
+ ``` json
+ {
+ "vault_aws_engine": {
+ "name": "myrole",
+ "role_arn": "myarn",
+ "ttl": "3600s"
+ }
+ }
+ ```
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/common/_AccessConfig-required.html.md b/website/source/partials/builder/amazon/common/_AccessConfig-required.html.md
new file mode 100644
index 000000000..b898b65c9
--- /dev/null
+++ b/website/source/partials/builder/amazon/common/_AccessConfig-required.html.md
@@ -0,0 +1,15 @@
+
+
+- `access_key` (string) - The access key used to communicate with AWS. [Learn how to set this]
+ (/docs/builders/amazon.html#specifying-amazon-credentials). On EBS, this
+ is not required if you are using `use_vault_aws_engine` for
+ authentication instead.
+
+- `region` (string) - The name of the region, such as `us-east-1`, in which
+ to launch the EC2 instance to create the AMI.
+ When chroot building, this value is guessed from environment.
+
+- `secret_key` (string) - The secret key used to communicate with AWS. [Learn how to set
+ this](amazon.html#specifying-amazon-credentials). This is not required
+ if you are using `use_vault_aws_engine` for authentication instead.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/common/_AmiFilterOptions-not-required.html.md b/website/source/partials/builder/amazon/common/_AmiFilterOptions-not-required.html.md
new file mode 100644
index 000000000..4e1187cc9
--- /dev/null
+++ b/website/source/partials/builder/amazon/common/_AmiFilterOptions-not-required.html.md
@@ -0,0 +1,3 @@
+
+
+- `most_recent` (bool) - Most Recent
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/common/_BlockDevice-not-required.html.md b/website/source/partials/builder/amazon/common/_BlockDevice-not-required.html.md
new file mode 100644
index 000000000..8f85eda56
--- /dev/null
+++ b/website/source/partials/builder/amazon/common/_BlockDevice-not-required.html.md
@@ -0,0 +1,50 @@
+
+
+- `delete_on_termination` (bool) - Indicates whether the EBS volume is
+ deleted on instance termination. Default false. NOTE: If this
+ value is not explicitly set to true and volumes are not cleaned up by
+ an alternative method, additional volumes will accumulate after every
+ build.
+
+- `device_name` (string) - The device name exposed to the instance (for
+ example, /dev/sdh or xvdh). Required for every device in the block
+ device mapping.
+
+- `encrypted` (*bool) - Indicates whether or not to encrypt the volume.
+ By default, Packer will keep the encryption setting to what it was in
+ the source image. Setting false will result in an unencrypted device,
+ and true will result in an encrypted one.
+
+- `iops` (int64) - The number of I/O operations per second (IOPS) that
+ the volume supports. See the documentation on
+ IOPs
+ for more information
+
+- `no_device` (bool) - Suppresses the specified device included in the
+ block device mapping of the AMI.
+
+- `snapshot_id` (string) - The ID of the snapshot.
+
+- `virtual_name` (string) - The virtual device name. See the
+ documentation on Block Device
+ Mapping
+ for more information.
+
+- `volume_type` (string) - The volume type. gp2 for General Purpose
+ (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, st1 for
+ Throughput Optimized HDD, sc1 for Cold HDD, and standard for
+ Magnetic volumes.
+
+- `volume_size` (int64) - The size of the volume, in GiB. Required if
+ not specifying a snapshot_id.
+
+- `kms_key_id` (string) - ID, alias or ARN of the KMS key to use for boot
+ volume encryption. This only applies to the main region, other regions
+ where the AMI will be copied will be encrypted by the default EBS KMS key.
+ For valid formats see KmsKeyId in the [AWS API docs -
+ CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html)
+ This field is validated by Packer, when using an alias, you will have to
+ prefix kms_key_id with alias/.
+
+- `omit_from_artifact` (bool) - ebssurrogate only
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/common/_LaunchBlockDevices-not-required.html.md b/website/source/partials/builder/amazon/common/_LaunchBlockDevices-not-required.html.md
new file mode 100644
index 000000000..d9efd82e8
--- /dev/null
+++ b/website/source/partials/builder/amazon/common/_LaunchBlockDevices-not-required.html.md
@@ -0,0 +1,19 @@
+
+
+- `launch_block_device_mappings` ([]BlockDevice) - Add one or more block devices before the Packer build starts. If you add
+ instance store volumes or EBS volumes in addition to the root device
+ volume, the created AMI will contain block device mapping information
+ for those volumes. Amazon creates snapshots of the source instance's
+ root volume and any other EBS volumes described here. When you launch an
+ instance from this new AMI, the instance automatically launches with
+ these additional volumes, and will restore them from snapshots taken
+ from the source instance.
+
+ In addition to the fields available in ami_block_device_mappings, you
+ may optionally use the following field:
+ - `omit_from_artifact` (boolean) - If true, this block device will not
+ be snapshotted and the created AMI will not contain block device mapping
+ information for this volume. If false, the block device will be mapped
+ into the final created AMI. Set this option to true if you need a block
+ device mounted in the surrogate AMI but not in the final created AMI.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/common/_RunConfig-not-required.html.md b/website/source/partials/builder/amazon/common/_RunConfig-not-required.html.md
new file mode 100644
index 000000000..7fbb68c95
--- /dev/null
+++ b/website/source/partials/builder/amazon/common/_RunConfig-not-required.html.md
@@ -0,0 +1,246 @@
+
+
+- `associate_public_ip_address` (bool) - If using a non-default VPC,
+ public IP addresses are not provided by default. If this is true, your
+ new instance will get a Public IP. default: false
+
+- `availability_zone` (string) - Destination availability zone to launch
+ instance in. Leave this empty to allow Amazon to auto-assign.
+
+- `block_duration_minutes` (int64) - Requires spot_price to be set. The
+ required duration for the Spot Instances (also known as Spot blocks). This
+ value must be a multiple of 60 (60, 120, 180, 240, 300, or 360). You can't
+ specify an Availability Zone group or a launch group if you specify a
+ duration.
+
+- `disable_stop_instance` (bool) - Packer normally stops the build instance after all provisioners have
+ run. For Windows instances, it is sometimes desirable to [run
+ Sysprep](http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ami-create-standard.html)
+ which will stop the instance for you. If this is set to `true`, Packer
+ *will not* stop the instance but will assume that you will send the stop
+ signal yourself through your final provisioner. You can do this with a
+ [windows-shell
+ provisioner](https://www.packer.io/docs/provisioners/windows-shell.html).
+ Note that Packer will still wait for the instance to be stopped, and
+ failing to send the stop signal yourself, when you have set this flag to
+ `true`, will cause a timeout.
+ Example of a valid shutdown command:
+
+ ``` json
+ {
+ "type": "windows-shell",
+ "inline": ["\"c:\\Program Files\\Amazon\\Ec2ConfigService\\ec2config.exe\" -sysprep"]
+ }
+ ```
+
+- `ebs_optimized` (bool) - Mark instance as [EBS
+ Optimized](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
+ Default `false`.
+
+- `enable_t2_unlimited` (bool) - Enabling T2 Unlimited allows the source instance to burst additional CPU
+ beyond its available [CPU
+ Credits](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-credits-baseline-concepts.html)
+ for as long as the demand exists. This is in contrast to the standard
+ configuration that only allows an instance to consume up to its
+ available CPU Credits. See the AWS documentation for [T2
+ Unlimited](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-unlimited.html)
+ and the **T2 Unlimited Pricing** section of the [Amazon EC2 On-Demand
+ Pricing](https://aws.amazon.com/ec2/pricing/on-demand/) document for
+ more information. By default this option is disabled and Packer will set
+ up a [T2
+ Standard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-std.html)
+ instance instead.
+
+ To use T2 Unlimited you must use a T2 instance type, e.g. `t2.micro`.
+ Additionally, T2 Unlimited cannot be used in conjunction with Spot
+ Instances, e.g. when the `spot_price` option has been configured.
+ Attempting to do so will cause an error.
+
+ !> **Warning!** Additional costs may be incurred by enabling T2
+ Unlimited - even for instances that would usually qualify for the
+ [AWS Free Tier](https://aws.amazon.com/free/).
+
+- `iam_instance_profile` (string) - The name of an [IAM instance
+ profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html)
+ to launch the EC2 instance with.
+
+- `shutdown_behavior` (string) - Automatically terminate instances on
+ shutdown in case Packer exits ungracefully. Possible values are stop and
+ terminate. Defaults to stop.
+
+- `security_group_filter` (SecurityGroupFilterOptions) - Filters used to populate the `security_group_ids` field. Example:
+
+ ``` json
+ {
+ "security_group_filter": {
+ "filters": {
+ "tag:Class": "packer"
+ }
+ }
+ }
+ ```
+
+ This selects the SG's with tag `Class` with the value `packer`.
+
+ - `filters` (map of strings) - filters used to select a
+ `security_group_ids`. Any filter described in the docs for
+ [DescribeSecurityGroups](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
+ is valid.
+
+ `security_group_ids` take precedence over this.
+
+- `run_tags` (map[string]string) - Tags to apply to the instance that is *launched* to create the AMI.
+ These tags are *not* applied to the resulting AMI unless they're
+ duplicated in `tags`. This is a [template
+ engine](/docs/templates/engine.html), see [Build template
+ data](#build-template-data) for more information.
+
+- `security_group_id` (string) - The ID (not the name) of the security
+ group to assign to the instance. By default this is not set and Packer will
+ automatically create a new temporary security group to allow SSH access.
+ Note that if this is specified, you must be sure the security group allows
+ access to the ssh_port given below.
+
+- `security_group_ids` ([]string) - A list of security groups as
+ described above. Note that if this is specified, you must omit the
+ security_group_id.
+
+- `source_ami_filter` (AmiFilterOptions) - Filters used to populate the `source_ami`
+ field. Example:
+
+ ``` json
+ {
+ "source_ami_filter": {
+ "filters": {
+ "virtualization-type": "hvm",
+ "name": "ubuntu/images/\*ubuntu-xenial-16.04-amd64-server-\*",
+ "root-device-type": "ebs"
+ },
+ "owners": ["099720109477"],
+ "most_recent": true
+ }
+ }
+ ```
+
+ This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
+ This will fail unless *exactly* one AMI is returned. In the above example,
+ `most_recent` will cause this to succeed by selecting the newest image.
+
+ - `filters` (map of strings) - filters used to select a `source_ami`.
+ NOTE: This will fail unless *exactly* one AMI is returned. Any filter
+ described in the docs for
+ [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
+ is valid.
+
+ - `owners` (array of strings) - Filters the images by their owner. You
+ may specify one or more AWS account IDs, "self" (which will use the
+ account whose credentials you are using to run Packer), or an AWS owner
+ alias: for example, `amazon`, `aws-marketplace`, or `microsoft`. This
+ option is required for security reasons.
+
+ - `most_recent` (boolean) - Selects the newest created image when true.
+ This is most useful for selecting a daily distro build.
+
+ You may set this in place of `source_ami` or in conjunction with it. If you
+ set this in conjunction with `source_ami`, the `source_ami` will be added
+ to the filter. The provided `source_ami` must meet all of the filtering
+ criteria provided in `source_ami_filter`; this pins the AMI returned by the
+ filter, but will cause Packer to fail if the `source_ami` does not exist.
+
+- `spot_instance_types` ([]string) - a list of acceptable instance
+ types to run your build on. We will request a spot instance using the max
+ price of spot_price and the allocation strategy of "lowest price".
+ Your instance will be launched on an instance type of the lowest available
+ price that you have in your list. This is used in place of instance_type.
+ You may only set either spot_instance_types or instance_type, not both.
+ This feature exists to help prevent situations where a Packer build fails
+ because a particular availability zone does not have capacity for the
+ specific instance_type requested in instance_type.
+
+- `spot_price` (string) - The maximum hourly price to pay for a spot instance
+ to create the AMI. Spot instances are a type of instance that EC2 starts
+ when the current spot price is less than the maximum price you specify.
+ Spot price will be updated based on available spot instance capacity and
+ current spot instance requests. It may save you some costs. You can set
+ this to auto for Packer to automatically discover the best spot price or
+ to "0" to use an on demand instance (default).
+
+- `spot_price_auto_product` (string) - Required if spot_price is set to
+ auto. This tells Packer what sort of AMI you're launching to find the
+ best spot price. This must be one of: Linux/UNIX, SUSE Linux,
+ Windows, Linux/UNIX (Amazon VPC), SUSE Linux (Amazon VPC),
+ Windows (Amazon VPC)
+
+- `spot_tags` (map[string]string) - Requires spot_price to be
+ set. This tells Packer to apply tags to the spot request that is issued.
+
+- `subnet_filter` (SubnetFilterOptions) - Filters used to populate the `subnet_id` field.
+ Example:
+
+ ``` json
+ {
+ "subnet_filter": {
+ "filters": {
+ "tag:Class": "build"
+ },
+ "most_free": true,
+ "random": false
+ }
+ }
+ ```
+
+ This selects the Subnet with tag `Class` with the value `build`, which has
+ the most free IP addresses. NOTE: This will fail unless *exactly* one
+ Subnet is returned. By using `most_free` or `random` one will be selected
+ from those matching the filter.
+
+ - `filters` (map of strings) - filters used to select a `subnet_id`.
+ NOTE: This will fail unless *exactly* one Subnet is returned. Any
+ filter described in the docs for
+ [DescribeSubnets](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html)
+ is valid.
+
+ - `most_free` (boolean) - The Subnet with the most free IPv4 addresses
+ will be used if multiple Subnets matches the filter.
+
+ - `random` (boolean) - A random Subnet will be used if multiple Subnets
+ matches the filter. `most_free` have precendence over this.
+
+ `subnet_id` take precedence over this.
+
+- `subnet_id` (string) - If using VPC, the ID of the subnet, such as
+ subnet-12345def, where Packer will launch the EC2 instance. This field is
+ required if you are using an non-default VPC.
+
+- `temporary_key_pair_name` (string) - The name of the temporary key pair to
+ generate. By default, Packer generates a name that looks like
+ `packer_`, where <UUID> is a 36 character unique identifier.
+
+- `temporary_security_group_source_cidrs` ([]string) - A list of IPv4 CIDR blocks to be authorized access to the instance, when
+ packer is creating a temporary security group.
+
+ The default is [`0.0.0.0/0`] (i.e., allow any IPv4 source). This is only
+ used when `security_group_id` or `security_group_ids` is not specified.
+
+- `user_data` (string) - User data to apply when launching the instance. Note
+ that you need to be careful about escaping characters due to the templates
+ being JSON. It is often more convenient to use user_data_file, instead.
+ Packer will not automatically wait for a user script to finish before
+ shutting down the instance this must be handled in a provisioner.
+
+- `user_data_file` (string) - Path to a file that will be used for the user
+ data when launching the instance.
+
+- `vpc_filter` (VpcFilterOptions) - Filters used to populate the vpc_id field.
+ vpc_id take precedence over this.
+ Example:
+
+- `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID
+ in order to create a temporary security group within the VPC. Requires
+ subnet_id to be set. If this field is left blank, Packer will try to get
+ the VPC ID from the subnet_id.
+
+- `windows_password_timeout` (time.Duration) - The timeout for waiting for a Windows
+ password for Windows instances. Defaults to 20 minutes. Example value:
+ 10m
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/common/_RunConfig-required.html.md b/website/source/partials/builder/amazon/common/_RunConfig-required.html.md
new file mode 100644
index 000000000..af4443aeb
--- /dev/null
+++ b/website/source/partials/builder/amazon/common/_RunConfig-required.html.md
@@ -0,0 +1,10 @@
+
+
+- `instance_type` (string) - The EC2 instance type to use while building the
+ AMI, such as t2.small.
+
+- `source_ami` (string) - The source AMI whose root volume will be copied and
+ provisioned on the currently running instance. This must be an EBS-backed
+ AMI with a root volume snapshot that you have access to. Note: this is not
+ used when from_scratch is set to true.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/common/_SubnetFilterOptions-not-required.html.md b/website/source/partials/builder/amazon/common/_SubnetFilterOptions-not-required.html.md
new file mode 100644
index 000000000..e40a7730d
--- /dev/null
+++ b/website/source/partials/builder/amazon/common/_SubnetFilterOptions-not-required.html.md
@@ -0,0 +1,4 @@
+
+
+- `most_free` (bool) - Most Free
+- `random` (bool) - Random
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/common/_VaultAWSEngineOptions-not-required.html.md b/website/source/partials/builder/amazon/common/_VaultAWSEngineOptions-not-required.html.md
new file mode 100644
index 000000000..c329c73ac
--- /dev/null
+++ b/website/source/partials/builder/amazon/common/_VaultAWSEngineOptions-not-required.html.md
@@ -0,0 +1,15 @@
+
+
+- `name` (string) - Name
+- `role_arn` (string) - Role ARN
+- `ttl` (string) - Specifies the TTL for the use of the STS token. This
+ is specified as a string with a duration suffix. Valid only when
+ credential_type is assumed_role or federation_token. When not
+ specified, the default_sts_ttl set for the role will be used. If that
+ is also not set, then the default value of 3600s will be used. AWS
+ places limits on the maximum TTL allowed. See the AWS documentation on
+ the DurationSeconds parameter for AssumeRole (for assumed_role
+ credential types) and GetFederationToken (for federation_token
+ credential types) for more details.
+
+- `engine_name` (string) - Engine Name
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/ebs/_Config-not-required.html.md b/website/source/partials/builder/amazon/ebs/_Config-not-required.html.md
new file mode 100644
index 000000000..80f25ce3e
--- /dev/null
+++ b/website/source/partials/builder/amazon/ebs/_Config-not-required.html.md
@@ -0,0 +1,8 @@
+
+
+- `run_volume_tags` (awscommon.TagMap) - Tags to apply to the volumes that are *launched* to create the AMI.
+ These tags are *not* applied to the resulting AMI unless they're
+ duplicated in `tags`. This is a [template
+ engine](/docs/templates/engine.html), see [Build template
+ data](#build-template-data) for more information.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/ebssurrogate/_Config-not-required.html.md b/website/source/partials/builder/amazon/ebssurrogate/_Config-not-required.html.md
new file mode 100644
index 000000000..db9846f2a
--- /dev/null
+++ b/website/source/partials/builder/amazon/ebssurrogate/_Config-not-required.html.md
@@ -0,0 +1,11 @@
+
+
+- `run_volume_tags` (awscommon.TagMap) - Tags to apply to the volumes that are *launched* to create the AMI.
+ These tags are *not* applied to the resulting AMI unless they're
+ duplicated in `tags`. This is a [template
+ engine](/docs/templates/engine.html), see [Build template
+ data](#build-template-data) for more information.
+
+- `ami_architecture` (string) - what architecture to use when registering the
+ final AMI; valid options are "x86_64" or "arm64". Defaults to "x86_64".
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/ebssurrogate/_Config-required.html.md b/website/source/partials/builder/amazon/ebssurrogate/_Config-required.html.md
new file mode 100644
index 000000000..371cfbd33
--- /dev/null
+++ b/website/source/partials/builder/amazon/ebssurrogate/_Config-required.html.md
@@ -0,0 +1,10 @@
+
+
+- `ami_root_device` (RootBlockDevice) - A block device mapping describing the root device of the AMI. This looks
+ like the mappings in `ami_block_device_mapping`, except with an
+ additional field:
+
+ - `source_device_name` (string) - The device name of the block device on
+ the source instance to be used as the root device for the AMI. This
+ must correspond to a block device in `launch_block_device_mapping`.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/ebssurrogate/_RootBlockDevice-not-required.html.md b/website/source/partials/builder/amazon/ebssurrogate/_RootBlockDevice-not-required.html.md
new file mode 100644
index 000000000..17b0d8c3f
--- /dev/null
+++ b/website/source/partials/builder/amazon/ebssurrogate/_RootBlockDevice-not-required.html.md
@@ -0,0 +1,26 @@
+
+
+- `source_device_name` (string) - Source Device Name
+- `device_name` (string) - The device name exposed to the instance (for
+ example, /dev/sdh or xvdh). Required for every device in the block
+ device mapping.
+
+- `delete_on_termination` (bool) - Indicates whether the EBS volume is
+ deleted on instance termination. Default false. NOTE: If this
+ value is not explicitly set to true and volumes are not cleaned up by
+ an alternative method, additional volumes will accumulate after every
+ build.
+
+- `iops` (int64) - The number of I/O operations per second (IOPS) that
+ the volume supports. See the documentation on
+ IOPs
+ for more information
+
+- `volume_type` (string) - The volume type. gp2 for General Purpose
+ (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, st1 for
+ Throughput Optimized HDD, sc1 for Cold HDD, and standard for
+ Magnetic volumes.
+
+- `volume_size` (int64) - The size of the volume, in GiB. Required if
+ not specifying a snapshot_id.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/ebsvolume/_BlockDevice-not-required.html.md b/website/source/partials/builder/amazon/ebsvolume/_BlockDevice-not-required.html.md
new file mode 100644
index 000000000..c27a24348
--- /dev/null
+++ b/website/source/partials/builder/amazon/ebsvolume/_BlockDevice-not-required.html.md
@@ -0,0 +1,6 @@
+
+
+- `tags` (awscommon.TagMap) - Tags applied to the AMI. This is a
+ template engine, see Build template
+ data for more information.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/ebsvolume/_Config-not-required.html.md b/website/source/partials/builder/amazon/ebsvolume/_Config-not-required.html.md
new file mode 100644
index 000000000..6ec436c83
--- /dev/null
+++ b/website/source/partials/builder/amazon/ebsvolume/_Config-not-required.html.md
@@ -0,0 +1,19 @@
+
+
+- `ebs_volumes` ([]BlockDevice) - Add the block device
+ mappings to the AMI. The block device mappings allow for keys:
+
+- `ena_support` (*bool) - Enable enhanced networking (ENA but not SriovNetSupport) on
+ HVM-compatible AMIs. If set, add ec2:ModifyInstanceAttribute to your AWS
+ IAM policy. If false, this will disable enhanced networking in the final
+ AMI as opposed to passing the setting through unchanged from the source.
+ Note: you must make sure enhanced networking is enabled on your
+ instance. See Amazon's documentation on enabling enhanced networking.
+
+- `sriov_support` (bool) - Enable enhanced networking (SriovNetSupport but not ENA) on
+ HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your
+ AWS IAM policy. Note: you must make sure enhanced networking is enabled
+ on your instance. See [Amazon's documentation on enabling enhanced
+ networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
+ Default `false`.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/instance/_Config-not-required.html.md b/website/source/partials/builder/amazon/instance/_Config-not-required.html.md
new file mode 100644
index 000000000..fd4a09cd4
--- /dev/null
+++ b/website/source/partials/builder/amazon/instance/_Config-not-required.html.md
@@ -0,0 +1,24 @@
+
+
+- `bundle_destination` (string) - The directory on the running instance where
+ the bundled AMI will be saved prior to uploading. By default this is
+ /tmp. This directory must exist and be writable.
+
+- `bundle_prefix` (string) - The prefix for files created from bundling the
+ root volume. By default this is image-{{timestamp}}. The timestamp
+ variable should be used to make sure this is unique, otherwise it can
+ collide with other created AMIs by Packer in your account.
+
+- `bundle_upload_command` (string) - The command to use to upload the bundled
+ volume. See the "custom bundle commands" section below for more
+ information.
+
+- `bundle_vol_command` (string) - The command to use to bundle the volume.
+ See the "custom bundle commands" section below for more information.
+
+- `x509_upload_path` (string) - The path on the remote machine where the X509
+ certificate will be uploaded. This path must already exist and be writable.
+ X509 certificates are uploaded after provisioning is run, so it is
+ perfectly okay to create this directory as part of the provisioning
+ process. Defaults to /tmp.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/amazon/instance/_Config-required.html.md b/website/source/partials/builder/amazon/instance/_Config-required.html.md
new file mode 100644
index 000000000..afcc402e7
--- /dev/null
+++ b/website/source/partials/builder/amazon/instance/_Config-required.html.md
@@ -0,0 +1,18 @@
+
+
+- `account_id` (string) - Your AWS account ID. This is required for bundling
+ the AMI. This is not the same as the access key. You can find your
+ account ID in the security credentials page of your AWS account.
+
+- `s3_bucket` (string) - The name of the S3 bucket to upload the AMI. This
+ bucket will be created if it doesn't exist.
+
+- `x509_cert_path` (string) - The local path to a valid X509 certificate for
+ your AWS account. This is used for bundling the AMI. This X509 certificate
+ must be registered with your account from the security credentials page in
+ the AWS console.
+
+- `x509_key_path` (string) - The local path to the private key for the X509
+ certificate specified by x509_cert_path. This is used for bundling the
+ AMI.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/azure/arm/_ClientConfig-not-required.html.md b/website/source/partials/builder/azure/arm/_ClientConfig-not-required.html.md
new file mode 100644
index 000000000..f07c96675
--- /dev/null
+++ b/website/source/partials/builder/azure/arm/_ClientConfig-not-required.html.md
@@ -0,0 +1,20 @@
+
+
+- `cloud_environment_name` (string) - One of Public, China, Germany, or
+ USGovernment. Defaults to Public. Long forms such as
+ USGovernmentCloud and AzureUSGovernmentCloud are also supported.
+
+- `client_id` (string) - Client ID
+
+- `client_secret` (string) - Client secret/password
+
+- `client_cert_path` (string) - Certificate path for client auth
+
+- `client_jwt` (string) - JWT bearer token for client auth (RFC 7523, Sec. 2.2)
+
+- `object_id` (string) - Object ID
+- `tenant_id` (string) - The account identifier with which your client_id and
+ subscription_id are associated. If not specified, tenant_id will be
+ looked up using subscription_id.
+
+- `subscription_id` (string) - Subscription ID
\ No newline at end of file
diff --git a/website/source/partials/builder/azure/arm/_Config-not-required.html.md b/website/source/partials/builder/azure/arm/_Config-not-required.html.md
new file mode 100644
index 000000000..2f34f6237
--- /dev/null
+++ b/website/source/partials/builder/azure/arm/_Config-not-required.html.md
@@ -0,0 +1,185 @@
+
+
+- `capture_name_prefix` (string) - Capture
+
+- `capture_container_name` (string) - Capture Container Name
+- `shared_image_gallery` (SharedImageGallery) - Use a [Shared Gallery
+ image](https://azure.microsoft.com/en-us/blog/announcing-the-public-preview-of-shared-image-gallery/)
+ as the source for this build. *VHD targets are incompatible with this
+ build type* - the target must be a *Managed Image*.
+
+ "shared_image_gallery": {
+ "subscription": "00000000-0000-0000-0000-00000000000",
+ "resource_group": "ResourceGroup",
+ "gallery_name": "GalleryName",
+ "image_name": "ImageName",
+ "image_version": "1.0.0"
+ }
+ "managed_image_name": "TargetImageName",
+ "managed_image_resource_group_name": "TargetResourceGroup"
+
+- `image_version` (string) - Specify a specific version of an OS to boot from.
+ Defaults to `latest`. There may be a difference in versions available
+ across regions due to image synchronization latency. To ensure a consistent
+ version across regions set this value to one that is available in all
+ regions where you are deploying.
+
+ CLI example
+ `az vm image list --location westus --publisher Canonical --offer UbuntuServer --sku 16.04.0-LTS --all`
+
+- `image_url` (string) - Specify a custom VHD to use. If this value is set, do
+ not set image_publisher, image_offer, image_sku, or image_version.
+
+- `custom_managed_image_resource_group_name` (string) - Specify the source managed image's resource group used to use. If this
+ value is set, do not set image\_publisher, image\_offer, image\_sku, or
+ image\_version. If this value is set, the value
+ `custom_managed_image_name` must also be set. See
+ [documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images)
+ to learn more about managed images.
+
+- `custom_managed_image_name` (string) - Specify the source managed image's name to use. If this value is set, do
+ not set image\_publisher, image\_offer, image\_sku, or image\_version.
+ If this value is set, the value
+ `custom_managed_image_resource_group_name` must also be set. See
+ [documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images)
+ to learn more about managed images.
+
+- `location` (string) - Location
+- `vm_size` (string) - Size of the VM used for building. This can be changed when you deploy a
+ VM from your VHD. See
+ [pricing](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/)
+ information. Defaults to `Standard_A1`.
+
+ CLI example `az vm list-sizes --location westus`
+
+- `managed_image_resource_group_name` (string) - Managed Image Resource Group Name
+- `managed_image_name` (string) - Managed Image Name
+- `managed_image_storage_account_type` (string) - Specify the storage account
+ type for a managed image. Valid values are Standard_LRS and Premium_LRS.
+ The default is Standard_LRS.
+
+- `managed_image_os_disk_snapshot_name` (string) - If
+ managed_image_os_disk_snapshot_name is set, a snapshot of the OS disk
+ is created with the same name as this value before the VM is captured.
+
+- `managed_image_data_disk_snapshot_prefix` (string) - If
+ managed_image_data_disk_snapshot_prefix is set, snapshot of the data
+ disk(s) is created with the same prefix as this value before the VM is
+ captured.
+
+- `managed_image_zone_resilient` (bool) - Store the image in zone-resilient storage. You need to create it in a
+ region that supports [availability
+ zones](https://docs.microsoft.com/en-us/azure/availability-zones/az-overview).
+
+- `azure_tags` (map[string]*string) - the user can define up to 15
+ tags. Tag names cannot exceed 512 characters, and tag values cannot exceed
+ 256 characters. Tags are applied to every resource deployed by a Packer
+ build, i.e. Resource Group, VM, NIC, VNET, Public IP, KeyVault, etc.
+
+- `resource_group_name` (string) - Resource Group Name
+- `storage_account` (string) - Storage Account
+- `temp_compute_name` (string) - temporary name assigned to the VM. If this
+ value is not set, a random value will be assigned. Knowing the resource
+ group and VM name allows one to execute commands to update the VM during a
+ Packer build, e.g. attach a resource disk to the VM.
+
+- `temp_resource_group_name` (string) - Temp Resource Group Name
+- `build_resource_group_name` (string) - Build Resource Group Name
+- `private_virtual_network_with_public_ip` (bool) - This value allows you to
+ set a virtual_network_name and obtain a public IP. If this value is not
+ set and virtual_network_name is defined Packer is only allowed to be
+ executed from a host on the same subnet / virtual network.
+
+- `virtual_network_name` (string) - Use a pre-existing virtual network for the
+ VM. This option enables private communication with the VM, no public IP
+ address is used or provisioned (unless you set
+ private_virtual_network_with_public_ip).
+
+- `virtual_network_subnet_name` (string) - If virtual_network_name is set,
+ this value may also be set. If virtual_network_name is set, and this
+ value is not set the builder attempts to determine the subnet to use with
+ the virtual network. If the subnet cannot be found, or it cannot be
+ disambiguated, this value should be set.
+
+- `virtual_network_resource_group_name` (string) - If virtual_network_name is
+ set, this value may also be set. If virtual_network_name is set, and
+ this value is not set the builder attempts to determine the resource group
+ containing the virtual network. If the resource group cannot be found, or
+ it cannot be disambiguated, this value should be set.
+
+- `custom_data_file` (string) - Specify a file containing custom data to inject into the cloud-init
+ process. The contents of the file are read and injected into the ARM
+ template. The custom data will be passed to cloud-init for processing at
+ the time of provisioning. See
+ [documentation](http://cloudinit.readthedocs.io/en/latest/topics/examples.html)
+ to learn more about custom data, and how it can be used to influence the
+ provisioning process.
+
+- `plan_info` (PlanInformation) - Used for creating images from Marketplace images. Please refer to
+ [Deploy an image with Marketplace
+ terms](https://aka.ms/azuremarketplaceapideployment) for more details.
+ Not all Marketplace images support programmatic deployment, and support
+ is controlled by the image publisher.
+
+ An example plan\_info object is defined below.
+
+ ``` json
+ {
+ "plan_info": {
+ "plan_name": "rabbitmq",
+ "plan_product": "rabbitmq",
+ "plan_publisher": "bitnami"
+ }
+ }
+ ```
+
+ `plan_name` (string) - The plan name, required. `plan_product` (string) -
+ The plan product, required. `plan_publisher` (string) - The plan publisher,
+ required. `plan_promotion_code` (string) - Some images accept a promotion
+ code, optional.
+
+ Images created from the Marketplace with `plan_info` **must** specify
+ `plan_info` whenever the image is deployed. The builder automatically adds
+ tags to the image to ensure this information is not lost. The following
+ tags are added.
+
+ 1. PlanName
+ 2. PlanProduct
+ 3. PlanPublisher
+ 4. PlanPromotionCode
+
+- `os_type` (string) - If either Linux or Windows is specified Packer will
+ automatically configure authentication credentials for the provisioned
+ machine. For Linux this configures an SSH authorized key. For Windows
+ this configures a WinRM certificate.
+
+- `os_disk_size_gb` (int32) - Specify the size of the OS disk in GB
+ (gigabytes). Values of zero or less than zero are ignored.
+
+- `disk_additional_size` ([]int32) - The size(s) of any additional hard disks for the VM in gigabytes. If
+ this is not specified then the VM will only contain an OS disk. The
+ number of additional disks and maximum size of a disk depends on the
+ configuration of your VM. See
+ [Windows](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/about-disks-and-vhds)
+ or
+ [Linux](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/about-disks-and-vhds)
+ for more information.
+
+ For VHD builds the final artifacts will be named
+ `PREFIX-dataDisk-.UUID.vhd` and stored in the specified capture
+ container along side the OS disk. The additional disks are included in
+ the deployment template `PREFIX-vmTemplate.UUID`.
+
+ For Managed build the final artifacts are included in the managed image.
+ The additional disk will have the same storage account type as the OS
+ disk, as specified with the `managed_image_storage_account_type`
+ setting.
+
+- `disk_caching_type` (string) - Specify the disk caching type. Valid values
+ are None, ReadOnly, and ReadWrite. The default value is ReadWrite.
+
+- `async_resourcegroup_delete` (bool) - If you want packer to delete the
+ temporary resource group asynchronously set this value. It's a boolean
+ value and defaults to false. Important Setting this true means that
+ your builds are faster, however any failed deletes are not reported.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/azure/arm/_Config-required.html.md b/website/source/partials/builder/azure/arm/_Config-required.html.md
new file mode 100644
index 000000000..696875dac
--- /dev/null
+++ b/website/source/partials/builder/azure/arm/_Config-required.html.md
@@ -0,0 +1,22 @@
+
+
+- `image_publisher` (string) - PublisherName for your base image. See
+ [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/)
+ for details.
+
+ CLI example `az vm image list-publishers --location westus`
+
+- `image_offer` (string) - Offer for your base image. See
+ [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/)
+ for details.
+
+ CLI example
+ `az vm image list-offers --location westus --publisher Canonical`
+
+- `image_sku` (string) - SKU for your base image. See
+ [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/)
+ for details.
+
+ CLI example
+ `az vm image list-skus --location westus --publisher Canonical --offer UbuntuServer`
+
\ No newline at end of file
diff --git a/website/source/partials/builder/azure/arm/_PlanInformation-not-required.html.md b/website/source/partials/builder/azure/arm/_PlanInformation-not-required.html.md
new file mode 100644
index 000000000..603e28349
--- /dev/null
+++ b/website/source/partials/builder/azure/arm/_PlanInformation-not-required.html.md
@@ -0,0 +1,6 @@
+
+
+- `plan_name` (string) - Plan Name
+- `plan_product` (string) - Plan Product
+- `plan_publisher` (string) - Plan Publisher
+- `plan_promotion_code` (string) - Plan Promotion Code
\ No newline at end of file
diff --git a/website/source/partials/builder/azure/arm/_SharedImageGallery-not-required.html.md b/website/source/partials/builder/azure/arm/_SharedImageGallery-not-required.html.md
new file mode 100644
index 000000000..c36a6bc9c
--- /dev/null
+++ b/website/source/partials/builder/azure/arm/_SharedImageGallery-not-required.html.md
@@ -0,0 +1,12 @@
+
+
+- `subscription` (string) - Subscription
+- `resource_group` (string) - Resource Group
+- `gallery_name` (string) - Gallery Name
+- `image_name` (string) - Image Name
+- `image_version` (string) - Specify a specific version of an OS to boot from.
+ Defaults to latest. There may be a difference in versions available
+ across regions due to image synchronization latency. To ensure a consistent
+ version across regions set this value to one that is available in all
+ regions where you are deploying.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/cloudstack/_Config-not-required.html.md b/website/source/partials/builder/cloudstack/_Config-not-required.html.md
new file mode 100644
index 000000000..bb040f059
--- /dev/null
+++ b/website/source/partials/builder/cloudstack/_Config-not-required.html.md
@@ -0,0 +1,97 @@
+
+
+- `async_timeout` (time.Duration) - The time duration to wait for async calls to
+ finish. Defaults to 30m.
+
+- `http_get_only` (bool) - Some cloud providers only allow HTTP GET calls
+ to their CloudStack API. If using such a provider, you need to set this to
+ true in order for the provider to only make GET calls and no POST calls.
+
+- `ssl_no_verify` (bool) - Set to true to skip SSL verification.
+ Defaults to false.
+
+- `cidr_list` ([]string) - List of CIDR's that will have access to the new
+ instance. This is needed in order for any provisioners to be able to
+ connect to the instance. Defaults to [ "0.0.0.0/0" ]. Only required when
+ use_local_ip_address is false.
+
+- `create_security_group` (bool) - If true a temporary security group
+ will be created which allows traffic towards the instance from the
+ cidr_list. This option will be ignored if security_groups is also
+ defined. Requires expunge set to true. Defaults to false.
+
+- `disk_offering` (string) - The name or ID of the disk offering used for the
+ instance. This option is only available (and also required) when using
+ source_iso.
+
+- `disk_size` (int64) - The size (in GB) of the root disk of the new
+ instance. This option is only available when using source_template.
+
+- `expunge` (bool) - Set to true to expunge the instance when it is
+ destroyed. Defaults to false.
+
+- `hypervisor` (string) - The target hypervisor (e.g. XenServer, KVM) for
+ the new template. This option is required when using source_iso.
+
+- `instance_name` (string) - The name of the instance. Defaults to
+ "packer-UUID" where UUID is dynamically generated.
+
+- `project` (string) - The name or ID of the project to deploy the instance
+ to.
+
+- `public_ip_address` (string) - The public IP address or it's ID used for
+ connecting any provisioners to. If not provided, a temporary public IP
+ address will be associated and released during the Packer run.
+
+- `public_port` (int) - The fixed port you want to configure in the port
+ forwarding rule. Set this attribute if you do not want to use the a random
+ public port.
+
+- `security_groups` ([]string) - A list of security group IDs or
+ names to associate the instance with.
+
+- `prevent_firewall_changes` (bool) - Set to true to prevent network
+ ACLs or firewall rules creation. Defaults to false.
+
+- `temporary_keypair_name` (string) - The name of the temporary SSH key pair
+ to generate. By default, Packer generates a name that looks like
+ packer_, where is a 36 character unique identifier.
+
+- `use_local_ip_address` (bool) - Set to true to indicate that the
+ provisioners should connect to the local IP address of the instance.
+
+- `user_data` (string) - User data to launch with the instance. This is a
+ template engine see User Data bellow for
+ more details. Packer will not automatically wait for a user script to
+ finish before shutting down the instance this must be handled in a
+ provisioner.
+
+- `user_data_file` (string) - Path to a file that will be used for the user
+ data when launching the instance. This file will be parsed as a template
+ engine see User Data bellow for more
+ details.
+
+- `template_name` (string) - The name of the new template. Defaults to
+ "packer-{{timestamp}}" where timestamp will be the current time.
+
+- `template_display_text` (string) - The display text of the new template.
+ Defaults to the template_name.
+
+- `template_featured` (bool) - Set to true to indicate that the template
+ is featured. Defaults to false.
+
+- `template_public` (bool) - Set to true to indicate that the template
+ is available for all accounts. Defaults to false.
+
+- `template_password_enabled` (bool) - Set to true to indicate the
+ template should be password enabled. Defaults to false.
+
+- `template_requires_hvm` (bool) - Set to true to indicate the template
+ requires hardware-assisted virtualization. Defaults to false.
+
+- `template_scalable` (bool) - Set to true to indicate that the template
+ contains tools to support dynamic scaling of VM cpu/memory. Defaults to
+ false.
+
+- `template_tag` (string) - Template Tag
+- `tags` (map[string]string) - Tags
\ No newline at end of file
diff --git a/website/source/partials/builder/cloudstack/_Config-required.html.md b/website/source/partials/builder/cloudstack/_Config-required.html.md
new file mode 100644
index 000000000..51076ee1e
--- /dev/null
+++ b/website/source/partials/builder/cloudstack/_Config-required.html.md
@@ -0,0 +1,33 @@
+
+
+- `api_url` (string) - The CloudStack API endpoint we will connect to. It can
+ also be specified via environment variable CLOUDSTACK_API_URL, if set.
+
+- `api_key` (string) - The API key used to sign all API requests. It can also
+ be specified via environment variable CLOUDSTACK_API_KEY, if set.
+
+- `secret_key` (string) - The secret key used to sign all API requests. It
+ can also be specified via environment variable CLOUDSTACK_SECRET_KEY, if
+ set.
+
+- `network` (string) - The name or ID of the network to connect the instance
+ to.
+
+- `service_offering` (string) - The name or ID of the service offering used
+ for the instance.
+
+- `source_iso` (string) - The name or ID of an ISO that will be mounted
+ before booting the instance. This option is mutually exclusive with
+ source_template. When using source_iso, both disk_offering and
+ hypervisor are required.
+
+- `source_template` (string) - The name or ID of the template used as base
+ template for the instance. This option is mutually exclusive with
+ source_iso.
+
+- `zone` (string) - The name or ID of the zone where the instance will be
+ created.
+
+- `template_os` (string) - The name or ID of the template OS for the new
+ template that will be created.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/digitalocean/_Config-not-required.html.md b/website/source/partials/builder/digitalocean/_Config-not-required.html.md
new file mode 100644
index 000000000..f80d2a215
--- /dev/null
+++ b/website/source/partials/builder/digitalocean/_Config-not-required.html.md
@@ -0,0 +1,38 @@
+
+
+- `api_url` (string) - Non standard api endpoint URL. Set this if you are
+ using a DigitalOcean API compatible service. It can also be specified via
+ environment variable DIGITALOCEAN_API_URL.
+
+- `private_networking` (bool) - Set to true to enable private networking
+ for the droplet being created. This defaults to false, or not enabled.
+
+- `monitoring` (bool) - Set to true to enable monitoring for the droplet
+ being created. This defaults to false, or not enabled.
+
+- `ipv6` (bool) - Set to true to enable ipv6 for the droplet being
+ created. This defaults to false, or not enabled.
+
+- `snapshot_name` (string) - The name of the resulting snapshot that will
+ appear in your account. Defaults to "packer-{{timestamp}}" (see
+ configuration templates for more info).
+
+- `snapshot_regions` ([]string) - The regions of the resulting
+ snapshot that will appear in your account.
+
+- `state_timeout` (time.Duration) - The time to wait, as a duration string, for a
+ droplet to enter a desired state (such as "active") before timing out. The
+ default state timeout is "6m".
+
+- `droplet_name` (string) - The name assigned to the droplet. DigitalOcean
+ sets the hostname of the machine to this value.
+
+- `user_data` (string) - User data to launch with the Droplet. Packer will
+ not automatically wait for a user script to finish before shutting down the
+ instance this must be handled in a provisioner.
+
+- `user_data_file` (string) - Path to a file that will be used for the user
+ data when launching the Droplet.
+
+- `tags` ([]string) - Tags to apply to the droplet when it is created
+
\ No newline at end of file
diff --git a/website/source/partials/builder/digitalocean/_Config-required.html.md b/website/source/partials/builder/digitalocean/_Config-required.html.md
new file mode 100644
index 000000000..88c758b67
--- /dev/null
+++ b/website/source/partials/builder/digitalocean/_Config-required.html.md
@@ -0,0 +1,21 @@
+
+
+- `api_token` (string) - The client TOKEN to use to access your account. It
+ can also be specified via environment variable DIGITALOCEAN_API_TOKEN, if
+ set.
+
+- `region` (string) - The name (or slug) of the region to launch the droplet
+ in. Consequently, this is the region where the snapshot will be available.
+ See
+ https://developers.digitalocean.com/documentation/v2/#list-all-regions
+ for the accepted region names/slugs.
+
+- `size` (string) - The name (or slug) of the droplet size to use. See
+ https://developers.digitalocean.com/documentation/v2/#list-all-sizes
+ for the accepted size names/slugs.
+
+- `image` (string) - The name (or slug) of the base image to use. This is the
+ image that will be used to launch a new droplet and provision it. See
+ https://developers.digitalocean.com/documentation/v2/#list-all-images
+ for details on how to get a list of the accepted image names/slugs.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/docker/_AwsAccessConfig-not-required.html.md b/website/source/partials/builder/docker/_AwsAccessConfig-not-required.html.md
new file mode 100644
index 000000000..a07f57845
--- /dev/null
+++ b/website/source/partials/builder/docker/_AwsAccessConfig-not-required.html.md
@@ -0,0 +1,19 @@
+
+
+- `aws_access_key` (string) - The AWS access key used to communicate with
+ AWS. Learn how to set
+ this.
+
+- `aws_secret_key` (string) - The AWS secret key used to communicate with
+ AWS. Learn how to set
+ this.
+
+- `aws_token` (string) - The AWS access token to use. This is different from
+ the access key and secret key. If you're not sure what this is, then you
+ probably don't need it. This will also be read from the AWS_SESSION_TOKEN
+ environmental variable.
+
+- `aws_profile` (string) - The AWS shared credentials profile used to
+ communicate with AWS. Learn how to set
+ this.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/docker/_Config-not-required.html.md b/website/source/partials/builder/docker/_Config-not-required.html.md
new file mode 100644
index 000000000..1c79159c6
--- /dev/null
+++ b/website/source/partials/builder/docker/_Config-not-required.html.md
@@ -0,0 +1,62 @@
+
+
+- `author` (string) - Set the author (e-mail) of a commit.
+
+- `changes` ([]string) - Dockerfile instructions to add to the commit. Example of instructions
+ are CMD, ENTRYPOINT, ENV, and EXPOSE. Example: [ "USER ubuntu", "WORKDIR
+ /app", "EXPOSE 8080" ]
+
+- `container_dir` (string) - The directory inside container to mount temp directory from host server
+ for work file provisioner. This defaults to c:/packer-files on windows
+ and /packer-files on other systems.
+
+- `exec_user` (string) - Username (UID) to run remote commands with. You can also set the group
+ name/ID if you want: (UID or UID:GID). You may need this if you get
+ permission errors trying to run the shell or other provisioners.
+
+- `privileged` (bool) - If true, run the docker container with the `--privileged` flag. This
+ defaults to false if not set.
+
+- `pull` (bool) - If true, the configured image will be pulled using `docker pull` prior
+ to use. Otherwise, it is assumed the image already exists and can be
+ used. This defaults to true if not set.
+
+- `run_command` ([]string) - An array of arguments to pass to docker run in order to run the
+ container. By default this is set to ["-d", "-i", "-t",
+ "--entrypoint=/bin/sh", "--", "{{.Image}}"] if you are using a linux
+ container, and ["-d", "-i", "-t", "--entrypoint=powershell", "--",
+ "{{.Image}}"] if you are running a windows container. {{.Image}} is a
+ template variable that corresponds to the image template option. Passing
+ the entrypoint option this way will make it the default entrypoint of
+ the resulting image, so running docker run -it --rm will start the
+ docker image from the /bin/sh shell interpreter; you could run a script
+ or another shell by running docker run -it --rm -c /bin/bash. If your
+ docker image embeds a binary intended to be run often, you should
+ consider changing the default entrypoint to point to it.
+
+- `volumes` (map[string]string) - A mapping of additional volumes to mount into this container. The key of
+ the object is the host path, the value is the container path.
+
+- `fix_upload_owner` (bool) - If true, files uploaded to the container will be owned by the user the
+ container is running as. If false, the owner will depend on the version
+ of docker installed in the system. Defaults to true.
+
+- `windows_container` (bool) - If "true", tells Packer that you are building a Windows container
+ running on a windows host. This is necessary for building Windows
+ containers, because our normal docker bindings do not work for them.
+
+- `login` (bool) - This is used to login to dockerhub to pull a private base container. For
+ pushing to dockerhub, see the docker post-processors
+
+- `login_password` (string) - The password to use to authenticate to login.
+
+- `login_server` (string) - The server address to login to.
+
+- `login_username` (string) - The username to use to authenticate to login.
+
+- `ecr_login` (bool) - Defaults to false. If true, the builder will login in order to pull the
+ image from Amazon EC2 Container Registry (ECR). The builder only logs in
+ for the duration of the pull. If true login_server is required and
+ login, login_username, and login_password will be ignored. For more
+ information see the section on ECR.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/docker/_Config-required.html.md b/website/source/partials/builder/docker/_Config-required.html.md
new file mode 100644
index 000000000..a293f498f
--- /dev/null
+++ b/website/source/partials/builder/docker/_Config-required.html.md
@@ -0,0 +1,15 @@
+
+
+- `commit` (bool) - If true, the container will be committed to an image rather than exported.
+
+- `discard` (bool) - Throw away the container when the build is complete. This is useful for
+ the [artifice
+ post-processor](https://www.packer.io/docs/post-processors/artifice.html).
+
+- `export_path` (string) - The path where the final container will be exported as a tar file.
+
+- `image` (string) - The base image for the Docker container that will be started. This image
+ will be pulled from the Docker registry if it doesn't already exist.
+
+- `message` (string) - Set a message for the commit.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/googlecompute/_Config-not-required.html.md b/website/source/partials/builder/googlecompute/_Config-not-required.html.md
new file mode 100644
index 000000000..b1d5ccfe8
--- /dev/null
+++ b/website/source/partials/builder/googlecompute/_Config-not-required.html.md
@@ -0,0 +1,119 @@
+
+
+- `account_file` (string) - The JSON file containing your account
+ credentials. Not required if you run Packer on a GCE instance with a
+ service account. Instructions for creating the file or using service
+ accounts are above.
+
+- `accelerator_type` (string) - Full or partial URL of the guest accelerator
+ type. GPU accelerators can only be used with
+ "on_host_maintenance": "TERMINATE" option set. Example:
+ "projects/project_id/zones/europe-west1-b/acceleratorTypes/nvidia-tesla-k80"
+
+- `accelerator_count` (int64) - Number of guest accelerator cards to add to
+ the launched instance.
+
+- `address` (string) - The name of a pre-allocated static external IP
+ address. Note, must be the name and not the actual IP address.
+
+- `disable_default_service_account` (bool) - If true, the default service
+ account will not be used if service_account_email is not specified. Set
+ this value to true and omit service_account_email to provision a VM with
+ no service account.
+
+- `disk_name` (string) - The name of the disk, if unset the instance name
+ will be used.
+
+- `disk_size` (int64) - The size of the disk in GB. This defaults to 10,
+ which is 10GB.
+
+- `disk_type` (string) - Type of disk used to back your instance, like
+ pd-ssd or pd-standard. Defaults to pd-standard.
+
+- `image_name` (string) - The unique name of the resulting image. Defaults to
+ "packer-{{timestamp}}".
+
+- `image_description` (string) - The description of the resulting image.
+
+- `image_encryption_key` (*compute.CustomerEncryptionKey) - Image encryption key to apply to the created image. Possible values:
+
+- `image_family` (string) - The name of the image family to which the
+ resulting image belongs. You can create disks by specifying an image family
+ instead of a specific image name. The image family always returns its
+ latest image that is not deprecated.
+
+- `image_labels` (map[string]string) - Key/value pair labels to
+ apply to the created image.
+
+- `image_licenses` ([]string) - Licenses to apply to the created
+ image.
+
+- `instance_name` (string) - A name to give the launched instance. Beware
+ that this must be unique. Defaults to "packer-{{uuid}}".
+
+- `labels` (map[string]string) - Key/value pair labels to apply to
+ the launched instance.
+
+- `machine_type` (string) - The machine type. Defaults to "n1-standard-1".
+
+- `metadata` (map[string]string) - Metadata applied to the launched
+ instance.
+
+- `min_cpu_platform` (string) - A Minimum CPU Platform for VM Instance.
+ Availability and default CPU platforms vary across zones, based on the
+ hardware available in each GCP zone.
+ Details
+
+- `network` (string) - The Google Compute network id or URL to use for the
+ launched instance. Defaults to "default". If the value is not a URL, it
+ will be interpolated to
+ projects/((network_project_id))/global/networks/((network)). This value
+ is not required if a subnet is specified.
+
+- `network_project_id` (string) - The project ID for the network and
+ subnetwork to use for launched instance. Defaults to project_id.
+
+- `omit_external_ip` (bool) - If true, the instance will not have an
+ external IP. use_internal_ip must be true if this property is true.
+
+- `on_host_maintenance` (string) - Sets Host Maintenance Option. Valid
+ choices are MIGRATE and TERMINATE. Please see GCE Instance Scheduling
+ Options,
+ as not all machine_types support MIGRATE (i.e. machines with GPUs). If
+ preemptible is true this can only be TERMINATE. If preemptible is false,
+ it defaults to MIGRATE
+
+- `preemptible` (bool) - If true, launch a preemptible instance.
+
+- `state_timeout` (string) - The time to wait for instance state changes.
+ Defaults to "5m".
+
+- `region` (string) - The region in which to launch the instance. Defaults to
+ the region hosting the specified zone.
+
+- `scopes` ([]string) - The service account scopes for launched
+ instance. Defaults to:
+
+- `service_account_email` (string) - The service account to be used for
+ launched instance. Defaults to the project's default service account unless
+ disable_default_service_account is true.
+
+- `source_image_project_id` (string) - The project ID of the project
+ containing the source image.
+
+- `startup_script_file` (string) - The path to a startup script to run on the
+ VM from which the image will be made.
+
+- `subnetwork` (string) - The Google Compute subnetwork id or URL to use for
+ the launched instance. Only required if the network has been created with
+ custom subnetting. Note, the region of the subnetwork must match the
+ region or zone in which the VM is launched. If the value is not a URL,
+ it will be interpolated to
+ projects/((network_project_id))/regions/((region))/subnetworks/((subnetwork))
+
+- `tags` ([]string) - Assign network tags to apply firewall rules to
+ VM instance.
+
+- `use_internal_ip` (bool) - If true, use the instance's internal IP
+ instead of its external IP during building.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/googlecompute/_Config-required.html.md b/website/source/partials/builder/googlecompute/_Config-required.html.md
new file mode 100644
index 000000000..c2a1213a4
--- /dev/null
+++ b/website/source/partials/builder/googlecompute/_Config-required.html.md
@@ -0,0 +1,17 @@
+
+
+- `project_id` (string) - The project ID that will be used to launch
+ instances and store images.
+
+- `source_image` (string) - The source image to use to create the new image
+ from. You can also specify source_image_family instead. If both
+ source_image and source_image_family are specified, source_image
+ takes precedence. Example: "debian-8-jessie-v20161027"
+
+- `source_image_family` (string) - The source image family to use to create
+ the new image from. The image family always returns its latest image that
+ is not deprecated. Example: "debian-8".
+
+- `zone` (string) - The zone in which to launch the instance used to create
+ the image. Example: "us-central1-a"
+
\ No newline at end of file
diff --git a/website/source/partials/builder/hyperone/_Config-not-required.html.md b/website/source/partials/builder/hyperone/_Config-not-required.html.md
new file mode 100644
index 000000000..a392fed18
--- /dev/null
+++ b/website/source/partials/builder/hyperone/_Config-not-required.html.md
@@ -0,0 +1,61 @@
+
+
+- `api_url` (string) - Custom API endpoint URL, compatible with HyperOne.
+ It can also be specified via environment variable HYPERONE_API_URL.
+
+- `token_login` (string) - Login (an e-mail) on HyperOne platform. Set this
+ if you want to fetch the token by SSH authentication.
+
+- `state_timeout` (time.Duration) - Timeout for waiting on the API to complete
+ a request. Defaults to 5m.
+
+- `image_name` (string) - The name of the resulting image. Defaults to
+ "packer-{{timestamp}}"
+ (see configuration templates for more info).
+
+- `image_description` (string) - The description of the resulting image.
+
+- `image_tags` (map[string]interface{}) - Key/value pair tags to
+ add to the created image.
+
+- `image_service` (string) - The service of the resulting image.
+
+- `vm_name` (string) - The name of the created server.
+
+- `vm_tags` (map[string]interface{}) - Key/value pair tags to
+ add to the created server.
+
+- `disk_name` (string) - The name of the created disk.
+
+- `disk_type` (string) - The type of the created disk. Defaults to ssd.
+
+- `network` (string) - The ID of the network to attach to the created server.
+
+- `private_ip` (string) - The ID of the private IP within chosen network
+ that should be assigned to the created server.
+
+- `public_ip` (string) - The ID of the public IP that should be assigned to
+ the created server. If network is chosen, the public IP will be associated
+ with server's private IP.
+
+- `public_netadp_service` (string) - Custom service of public network adapter.
+ Can be useful when using custom api_url. Defaults to public.
+
+- `chroot_disk` (bool) - Chroot Disk
+- `chroot_disk_size` (float32) - Chroot Disk Size
+- `chroot_disk_type` (string) - Chroot Disk Type
+- `chroot_mount_path` (string) - Chroot Mount Path
+- `chroot_mounts` ([][]string) - Chroot Mounts
+- `chroot_copy_files` ([]string) - Chroot Copy Files
+- `chroot_command_wrapper` (string) - Chroot Command Wrapper
+- `mount_options` ([]string) - Mount Options
+- `mount_partition` (string) - Mount Partition
+- `pre_mount_commands` ([]string) - Pre Mount Commands
+- `post_mount_commands` ([]string) - Post Mount Commands
+- `ssh_keys` ([]string) - List of SSH keys by name or id to be added
+ to the server on launch.
+
+- `user_data` (string) - User data to launch with the server. Packer will not
+ automatically wait for a user script to finish before shutting down the
+ instance, this must be handled in a provisioner.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/hyperone/_Config-required.html.md b/website/source/partials/builder/hyperone/_Config-required.html.md
new file mode 100644
index 000000000..f5c6f4c2d
--- /dev/null
+++ b/website/source/partials/builder/hyperone/_Config-required.html.md
@@ -0,0 +1,16 @@
+
+
+- `token` (string) - The authentication token used to access your account.
+ This can be either a session token or a service account token.
+ If not defined, the builder will attempt to find it in the following order:
+
+- `project` (string) - The id or name of the project. This field is required
+ only if using session tokens. It should be skipped when using service
+ account authentication.
+
+- `source_image` (string) - ID or name of the image to launch server from.
+
+- `vm_type` (string) - ID or name of the type this server should be created with.
+
+- `disk_size` (float32) - Size of the created disk, in GiB.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/hyperv/common/_OutputConfig-not-required.html.md b/website/source/partials/builder/hyperv/common/_OutputConfig-not-required.html.md
new file mode 100644
index 000000000..89654a159
--- /dev/null
+++ b/website/source/partials/builder/hyperv/common/_OutputConfig-not-required.html.md
@@ -0,0 +1,10 @@
+
+
+- `output_directory` (string) - This setting specifies the directory that
+ artifacts from the build, such as the virtual machine files and disks,
+ will be output to. The path to the directory may be relative or
+ absolute. If relative, the path is relative to the working directory
+ packer is executed from. This directory must not exist or, if
+ created, must be empty prior to running the builder. By default this is
+ "output-BUILDNAME" where "BUILDNAME" is the name of the build.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/hyperv/common/_ShutdownConfig-not-required.html.md b/website/source/partials/builder/hyperv/common/_ShutdownConfig-not-required.html.md
new file mode 100644
index 000000000..89d00bcdc
--- /dev/null
+++ b/website/source/partials/builder/hyperv/common/_ShutdownConfig-not-required.html.md
@@ -0,0 +1,16 @@
+
+
+- `shutdown_command` (string) - The command to use to gracefully shut down
+ the machine once all provisioning is complete. By default this is an
+ empty string, which tells Packer to just forcefully shut down the
+ machine. This setting can be safely omitted if for example, a shutdown
+ command to gracefully halt the machine is configured inside a
+ provisioning script. If one or more scripts require a reboot it is
+ suggested to leave this blank (since reboots may fail) and instead
+ specify the final shutdown command in your last script.
+
+- `shutdown_timeout` (string) - The amount of time to wait after executing
+ the shutdown_command for the virtual machine to actually shut down.
+ If the machine doesn't shut down in this time it is considered an
+ error. By default, the time out is "5m" (five minutes).
+
\ No newline at end of file
diff --git a/website/source/partials/builder/hyperv/iso/_Config-not-required.html.md b/website/source/partials/builder/hyperv/iso/_Config-not-required.html.md
new file mode 100644
index 000000000..6d2288478
--- /dev/null
+++ b/website/source/partials/builder/hyperv/iso/_Config-not-required.html.md
@@ -0,0 +1,128 @@
+
+
+- `disk_size` (uint) - The size, in megabytes, of the hard disk to create
+ for the VM. By default, this is 40 GB.
+
+- `disk_block_size` (uint) - The block size of the VHD to be created.
+ Recommended disk block size for Linux hyper-v guests is 1 MiB. This
+ defaults to "32 MiB".
+
+- `memory` (uint) - The amount, in megabytes, of RAM to assign to the
+ VM. By default, this is 1 GB.
+
+- `secondary_iso_images` ([]string) - A list of ISO paths to
+ attach to a VM when it is booted. This is most useful for unattended
+ Windows installs, which look for an Autounattend.xml file on removable
+ media. By default, no secondary ISO will be attached.
+
+- `guest_additions_mode` (string) - If set to attach then attach and
+ mount the ISO image specified in guest_additions_path. If set to
+ none then guest additions are not attached and mounted; This is the
+ default.
+
+- `guest_additions_path` (string) - The path to the ISO image for guest
+ additions.
+
+- `vm_name` (string) - This is the name of the new virtual machine,
+ without the file extension. By default this is "packer-BUILDNAME",
+ where "BUILDNAME" is the name of the build.
+
+- `switch_name` (string) - The name of the switch to connect the virtual
+ machine to. By default, leaving this value unset will cause Packer to
+ try and determine the switch to use by looking for an external switch
+ that is up and running.
+
+- `switch_vlan_id` (string) - This is the VLAN of the virtual switch's
+ network card. By default none is set. If none is set then a VLAN is not
+ set on the switch's network card. If this value is set it should match
+ the VLAN specified in by vlan_id.
+
+- `mac_address` (string) - This allows a specific MAC address to be used on
+ the default virtual network card. The MAC address must be a string with
+ no delimiters, for example "0000deadbeef".
+
+- `vlan_id` (string) - This is the VLAN of the virtual machine's network
+ card for the new virtual machine. By default none is set. If none is set
+ then VLANs are not set on the virtual machine's network card.
+
+- `cpus` (uint) - The number of CPUs the virtual machine should use. If
+ this isn't specified, the default is 1 CPU.
+
+- `generation` (uint) - The Hyper-V generation for the virtual machine. By
+ default, this is 1. Generation 2 Hyper-V virtual machines do not support
+ floppy drives. In this scenario use secondary_iso_images instead. Hard
+ drives and DVD drives will also be SCSI and not IDE.
+
+- `enable_mac_spoofing` (bool) - If true enable MAC address spoofing
+ for the virtual machine. This defaults to false.
+
+- `use_legacy_network_adapter` (bool) - If true use a legacy network adapter as the NIC.
+ This defaults to false. A legacy network adapter is fully emulated NIC, and is thus
+ supported by various exotic operating systems, but this emulation requires
+ additional overhead and should only be used if absolutely necessary.
+
+- `enable_dynamic_memory` (bool) - If true enable dynamic memory for
+ the virtual machine. This defaults to false.
+
+- `enable_secure_boot` (bool) - If true enable secure boot for the
+ virtual machine. This defaults to false. See secure_boot_template
+ below for additional settings.
+
+- `secure_boot_template` (string) - The secure boot template to be
+ configured. Valid values are "MicrosoftWindows" (Windows) or
+ "MicrosoftUEFICertificateAuthority" (Linux). This only takes effect if
+ enable_secure_boot is set to "true". This defaults to "MicrosoftWindows".
+
+- `enable_virtualization_extensions` (bool) - If true enable
+ virtualization extensions for the virtual machine. This defaults to
+ false. For nested virtualization you need to enable MAC spoofing,
+ disable dynamic memory and have at least 4GB of RAM assigned to the
+ virtual machine.
+
+- `temp_path` (string) - The location under which Packer will create a
+ directory to house all the VM files and folders during the build.
+ By default %TEMP% is used which, for most systems, will evaluate to
+ %USERPROFILE%/AppData/Local/Temp.
+
+- `configuration_version` (string) - This allows you to set the vm version when
+ calling New-VM to generate the vm.
+
+- `keep_registered` (bool) - If "true", Packer will not delete the VM from
+ The Hyper-V manager.
+
+- `communicator` (string) - Communicator
+- `disk_additional_size` ([]uint) - The size or sizes of any
+ additional hard disks for the VM in megabytes. If this is not specified
+ then the VM will only contain a primary hard disk. Additional drives
+ will be attached to the SCSI interface only. The builder uses
+ expandable rather than fixed-size virtual hard disks, so the actual
+ file representing the disk will not use the full size unless it is
+ full.
+
+- `skip_compaction` (bool) - If true skip compacting the hard disk for
+ the virtual machine when exporting. This defaults to false.
+
+- `skip_export` (bool) - If true Packer will skip the export of the VM.
+ If you are interested only in the VHD/VHDX files, you can enable this
+ option. The resulting VHD/VHDX file will be output to
+ /Virtual Hard Disks. By default this option is false
+ and Packer will export the VM to output_directory.
+
+- `differencing_disk` (bool) - If true enables differencing disks. Only
+ the changes will be written to the new disk. This is especially useful if
+ your source is a VHD/VHDX. This defaults to false.
+
+- `use_fixed_vhd_format` (bool) - If true, creates the boot disk on the
+ virtual machine as a fixed VHD format disk. The default is false, which
+ creates a dynamic VHDX format disk. This option requires setting
+ generation to 1, skip_compaction to true, and
+ differencing_disk to false. Additionally, any value entered for
+ disk_block_size will be ignored. The most likely use case for this
+ option is outputing a disk that is in the format required for upload to
+ Azure.
+
+- `headless` (bool) - Packer defaults to building Hyper-V virtual
+ machines by launching a GUI that shows the console of the machine being
+ built. When this value is set to true, the machine will start without a
+ console.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/hyperv/vmcx/_Config-not-required.html.md b/website/source/partials/builder/hyperv/vmcx/_Config-not-required.html.md
new file mode 100644
index 000000000..42fbfaad2
--- /dev/null
+++ b/website/source/partials/builder/hyperv/vmcx/_Config-not-required.html.md
@@ -0,0 +1,117 @@
+
+
+- `memory` (uint) - The amount, in megabytes, of RAM to assign to the
+ VM. By default, this is 1 GB.
+
+- `secondary_iso_images` ([]string) - A list of ISO paths to
+ attach to a VM when it is booted. This is most useful for unattended
+ Windows installs, which look for an Autounattend.xml file on removable
+ media. By default, no secondary ISO will be attached.
+
+- `guest_additions_mode` (string) - If set to attach then attach and
+ mount the ISO image specified in guest_additions_path. If set to
+ none then guest additions are not attached and mounted; This is the
+ default.
+
+- `guest_additions_path` (string) - The path to the ISO image for guest
+ additions.
+
+- `clone_from_vmcx_path` (string) - This is the path to a directory containing an exported virtual machine.
+
+- `clone_from_vm_name` (string) - This is the name of the virtual machine to clone from.
+
+- `clone_from_snapshot_name` (string) - The name of a snapshot in the
+ source machine to use as a starting point for the clone. If the value
+ given is an empty string, the last snapshot present in the source will
+ be chosen as the starting point for the new VM.
+
+- `clone_all_snapshots` (bool) - If set to true all snapshots
+ present in the source machine will be copied when the machine is
+ cloned. The final result of the build will be an exported virtual
+ machine that contains all the snapshots of the parent.
+
+- `vm_name` (string) - This is the name of the new virtual machine,
+ without the file extension. By default this is "packer-BUILDNAME",
+ where "BUILDNAME" is the name of the build.
+
+- `differencing_disk` (bool) - If true enables differencing disks. Only
+ the changes will be written to the new disk. This is especially useful if
+ your source is a VHD/VHDX. This defaults to false.
+
+- `switch_name` (string) - The name of the switch to connect the virtual
+ machine to. By default, leaving this value unset will cause Packer to
+ try and determine the switch to use by looking for an external switch
+ that is up and running.
+
+- `copy_in_compare` (bool) - When cloning a vm to build from, we run a powershell
+ Compare-VM command, which, depending on your version of Windows, may need
+ the "Copy" flag to be set to true or false. Defaults to "false". Command:
+
+- `switch_vlan_id` (string) - This is the VLAN of the virtual switch's
+ network card. By default none is set. If none is set then a VLAN is not
+ set on the switch's network card. If this value is set it should match
+ the VLAN specified in by vlan_id.
+
+- `mac_address` (string) - This allows a specific MAC address to be used on
+ the default virtual network card. The MAC address must be a string with
+ no delimiters, for example "0000deadbeef".
+
+- `vlan_id` (string) - This is the VLAN of the virtual machine's network
+ card for the new virtual machine. By default none is set. If none is set
+ then VLANs are not set on the virtual machine's network card.
+
+- `cpus` (uint) - The number of CPUs the virtual machine should use. If
+ this isn't specified, the default is 1 CPU.
+
+- `generation` (uint) - The Hyper-V generation for the virtual machine. By
+ default, this is 1. Generation 2 Hyper-V virtual machines do not support
+ floppy drives. In this scenario use secondary_iso_images instead. Hard
+ drives and DVD drives will also be SCSI and not IDE.
+
+- `enable_mac_spoofing` (bool) - If true enable MAC address spoofing
+ for the virtual machine. This defaults to false.
+
+- `enable_dynamic_memory` (bool) - If true enable dynamic memory for
+ the virtual machine. This defaults to false.
+
+- `enable_secure_boot` (bool) - If true enable secure boot for the
+ virtual machine. This defaults to false. See secure_boot_template
+ below for additional settings.
+
+- `secure_boot_template` (string) - The secure boot template to be
+ configured. Valid values are "MicrosoftWindows" (Windows) or
+ "MicrosoftUEFICertificateAuthority" (Linux). This only takes effect if
+ enable_secure_boot is set to "true". This defaults to "MicrosoftWindows".
+
+- `enable_virtualization_extensions` (bool) - If true enable
+ virtualization extensions for the virtual machine. This defaults to
+ false. For nested virtualization you need to enable MAC spoofing,
+ disable dynamic memory and have at least 4GB of RAM assigned to the
+ virtual machine.
+
+- `temp_path` (string) - The location under which Packer will create a
+ directory to house all the VM files and folders during the build.
+ By default %TEMP% is used which, for most systems, will evaluate to
+ %USERPROFILE%/AppData/Local/Temp.
+
+- `configuration_version` (string) - This allows you to set the vm version when
+ calling New-VM to generate the vm.
+
+- `keep_registered` (bool) - If "true", Packer will not delete the VM from
+ The Hyper-V manager.
+
+- `communicator` (string) - Communicator
+- `skip_compaction` (bool) - If true skip compacting the hard disk for
+ the virtual machine when exporting. This defaults to false.
+
+- `skip_export` (bool) - If true Packer will skip the export of the VM.
+ If you are interested only in the VHD/VHDX files, you can enable this
+ option. The resulting VHD/VHDX file will be output to
+ /Virtual Hard Disks. By default this option is false
+ and Packer will export the VM to output_directory.
+
+- `headless` (bool) - Packer defaults to building Hyper-V virtual
+ machines by launching a GUI that shows the console of the machine being
+ built. When this value is set to true, the machine will start without a
+ console.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/lxc/_Config-not-required.html.md b/website/source/partials/builder/lxc/_Config-not-required.html.md
new file mode 100644
index 000000000..d54f4a36d
--- /dev/null
+++ b/website/source/partials/builder/lxc/_Config-not-required.html.md
@@ -0,0 +1,41 @@
+
+
+- `output_directory` (string) - The directory in which to save the exported
+ tar.gz. Defaults to output- in the current directory.
+
+- `container_name` (string) - The name of the LXC container. Usually stored
+ in /var/lib/lxc/containers/. Defaults to
+ packer-.
+
+- `command_wrapper` (string) - Allows you to specify a wrapper command, such
+ as ssh so you can execute packer builds on a remote host. Defaults to
+ Empty.
+
+- `init_timeout` (string) - The timeout in seconds to wait for the the
+ container to start. Defaults to 20 seconds.
+
+- `create_options` ([]string) - Options to pass to lxc-create. For
+ instance, you can specify a custom LXC container configuration file with
+ ["-f", "/path/to/lxc.conf"]. Defaults to []. See man 1 lxc-create for
+ available options.
+
+- `start_options` ([]string) - Options to pass to lxc-start. For
+ instance, you can override parameters from the LXC container configuration
+ file via ["--define", "KEY=VALUE"]. Defaults to []. See
+ man 1 lxc-start for available options.
+
+- `attach_options` ([]string) - Options to pass to lxc-attach. For
+ instance, you can prevent the container from inheriting the host machine's
+ environment by specifying ["--clear-env"]. Defaults to []. See
+ man 1 lxc-attach for available options.
+
+- `template_parameters` ([]string) - Options to pass to the given
+ lxc-template command, usually located in
+ /usr/share/lxc/templates/lxc-. Note: This gets passed as
+ ARGV to the template command. Ensure you have an array of strings, as a
+ single string with spaces probably won't work. Defaults to [].
+
+- `target_runlevel` (int) - The minimum run level to wait for the
+ container to reach. Note some distributions (Ubuntu) simulate run levels
+ and may report 5 rather than 3.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/lxc/_Config-required.html.md b/website/source/partials/builder/lxc/_Config-required.html.md
new file mode 100644
index 000000000..46ef7e120
--- /dev/null
+++ b/website/source/partials/builder/lxc/_Config-required.html.md
@@ -0,0 +1,9 @@
+
+
+- `config_file` (string) - The path to the lxc configuration file.
+
+- `template_name` (string) - The LXC template name to use.
+
+- `template_environment_vars` ([]string) - Environmental variables to
+ use to build the template with.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/lxd/_Config-not-required.html.md b/website/source/partials/builder/lxd/_Config-not-required.html.md
new file mode 100644
index 000000000..d0e837e73
--- /dev/null
+++ b/website/source/partials/builder/lxd/_Config-not-required.html.md
@@ -0,0 +1,22 @@
+
+
+- `output_image` (string) - The name of the output artifact. Defaults to
+ name.
+
+- `container_name` (string) - Container Name
+- `command_wrapper` (string) - Lets you prefix all builder commands, such as
+ with ssh for a remote build host. Defaults to "".
+
+- `profile` (string) - Profile
+- `init_sleep` (string) - The number of seconds to sleep between launching
+ the LXD instance and provisioning it; defaults to 3 seconds.
+
+- `publish_properties` (map[string]string) - Pass key values to the publish
+ step to be set as properties on the output image. This is most helpful to
+ set the description, but can be used to set anything needed. See
+ https://stgraber.org/2016/03/30/lxd-2-0-image-management-512/
+ for more properties.
+
+- `launch_config` (map[string]string) - List of key/value pairs you wish to
+ pass to lxc launch via --config. Defaults to empty.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/lxd/_Config-required.html.md b/website/source/partials/builder/lxd/_Config-required.html.md
new file mode 100644
index 000000000..c99958ad1
--- /dev/null
+++ b/website/source/partials/builder/lxd/_Config-required.html.md
@@ -0,0 +1,6 @@
+
+
+- `image` (string) - The source image to use when creating the build
+ container. This can be a (local or remote) image (name or fingerprint).
+ E.G. my-base-image, ubuntu-daily:x, 08fababf6f27, ...
+
\ No newline at end of file
diff --git a/website/source/partials/builder/ncloud/_Config-not-required.html.md b/website/source/partials/builder/ncloud/_Config-not-required.html.md
new file mode 100644
index 000000000..b0e934442
--- /dev/null
+++ b/website/source/partials/builder/ncloud/_Config-not-required.html.md
@@ -0,0 +1,32 @@
+
+
+- `access_key` (string) - Access Key
+- `secret_key` (string) - Secret Key
+- `member_server_image_no` (string) - Previous image code. If there is an
+ image previously created, it can be used to create a new image.
+ (server_image_product_code is required if not specified)
+
+- `server_image_name` (string) - Name of an image to create.
+
+- `server_image_description` (string) - Description of an image to create.
+
+- `user_data` (string) - User data to apply when launching the instance. Note
+ that you need to be careful about escaping characters due to the templates
+ being JSON. It is often more convenient to use user_data_file, instead.
+ Packer will not automatically wait for a user script to finish before
+ shutting down the instance this must be handled in a provisioner.
+
+- `user_data_file` (string) - Path to a file that will be used for the user
+ data when launching the instance.
+
+- `block_storage_size` (int) - You can add block storage ranging from 10
+ GB to 2000 GB, in increments of 10 GB.
+
+- `region` (string) - Name of the region where you want to create an image.
+ (default: Korea)
+
+- `access_control_group_configuration_no` (string) - This is used to allow
+ winrm access when you create a Windows server. An ACG that specifies an
+ access source (0.0.0.0/0) and allowed port (5985) must be created in
+ advance.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/ncloud/_Config-required.html.md b/website/source/partials/builder/ncloud/_Config-required.html.md
new file mode 100644
index 000000000..a30e0cf7d
--- /dev/null
+++ b/website/source/partials/builder/ncloud/_Config-required.html.md
@@ -0,0 +1,7 @@
+
+
+- `server_image_product_code` (string) - Product code of an image to create.
+ (member_server_image_no is required if not specified)
+
+- `server_product_code` (string) - Product (spec) code to create.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/openstack/_AccessConfig-not-required.html.md b/website/source/partials/builder/openstack/_AccessConfig-not-required.html.md
new file mode 100644
index 000000000..09caad53a
--- /dev/null
+++ b/website/source/partials/builder/openstack/_AccessConfig-not-required.html.md
@@ -0,0 +1,57 @@
+
+
+- `user_id` (string) - User ID
+- `tenant_id` (string) - The tenant ID or name to boot the
+ instance into. Some OpenStack installations require this. If not specified,
+ Packer will use the environment variable OS_TENANT_NAME or
+ OS_TENANT_ID, if set. Tenant is also called Project in later versions of
+ OpenStack.
+
+- `tenant_name` (string) - Tenant Name
+- `domain_id` (string) - Domain ID
+- `domain_name` (string) - The Domain name or ID you are
+ authenticating with. OpenStack installations require this if identity v3 is
+ used. Packer will use the environment variable OS_DOMAIN_NAME or
+ OS_DOMAIN_ID, if set.
+
+- `insecure` (bool) - Whether or not the connection to OpenStack can be
+ done over an insecure connection. By default this is false.
+
+- `region` (string) - The name of the region, such as "DFW", in which to
+ launch the server to create the image. If not specified, Packer will use
+ the environment variable OS_REGION_NAME, if set.
+
+- `endpoint_type` (string) - The endpoint type to use. Can be any of
+ "internal", "internalURL", "admin", "adminURL", "public", and "publicURL".
+ By default this is "public".
+
+- `cacert` (string) - Custom CA certificate file path. If omitted the
+ OS_CACERT environment variable can be used.
+
+- `cert` (string) - Client certificate file path for SSL client
+ authentication. If omitted the OS_CERT environment variable can be used.
+
+- `key` (string) - Client private key file path for SSL client
+ authentication. If omitted the OS_KEY environment variable can be used.
+
+- `token` (string) - the token (id) to use with token based authorization.
+ Packer will use the environment variable OS_TOKEN, if set.
+
+- `application_credential_name` (string) - The application credential name to
+ use with application credential based authorization. Packer will use the
+ environment variable OS_APPLICATION_CREDENTIAL_NAME, if set.
+
+- `application_credential_id` (string) - The application credential id to
+ use with application credential based authorization. Packer will use the
+ environment variable OS_APPLICATION_CREDENTIAL_ID, if set.
+
+- `application_credential_secret` (string) - The application credential secret
+ to use with application credential based authorization. Packer will use the
+ environment variable OS_APPLICATION_CREDENTIAL_SECRET, if set.
+
+- `cloud` (string) - An entry in a clouds.yaml file. See the OpenStack
+ os-client-config
+ documentation
+ for more information about clouds.yaml files. If omitted, the OS_CLOUD
+ environment variable is used.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/openstack/_AccessConfig-required.html.md b/website/source/partials/builder/openstack/_AccessConfig-required.html.md
new file mode 100644
index 000000000..e4db20d5f
--- /dev/null
+++ b/website/source/partials/builder/openstack/_AccessConfig-required.html.md
@@ -0,0 +1,17 @@
+
+
+- `username` (string) - The username or id used to connect to
+ the OpenStack service. If not specified, Packer will use the environment
+ variable OS_USERNAME or OS_USERID, if set. This is not required if
+ using access token or application credential instead of password, or if using
+ cloud.yaml.
+
+- `password` (string) - The password used to connect to the OpenStack
+ service. If not specified, Packer will use the environment variables
+ OS_PASSWORD, if set. This is not required if using access token or
+ application credential instead of password, or if using cloud.yaml.
+
+- `identity_endpoint` (string) - The URL to the OpenStack Identity service.
+ If not specified, Packer will use the environment variables OS_AUTH_URL,
+ if set. This is not required if using cloud.yaml.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/openstack/_ImageConfig-not-required.html.md b/website/source/partials/builder/openstack/_ImageConfig-not-required.html.md
new file mode 100644
index 000000000..574f0f782
--- /dev/null
+++ b/website/source/partials/builder/openstack/_ImageConfig-not-required.html.md
@@ -0,0 +1,18 @@
+
+
+- `metadata` (map[string]string) - Glance metadata that will be
+ applied to the image.
+
+- `image_visibility` (imageservice.ImageVisibility) - One of "public", "private", "shared", or
+ "community".
+
+- `image_members` ([]string) - List of members to add to the image
+ after creation. An image member is usually a project (also called the
+ "tenant") with whom the image is shared.
+
+- `image_disk_format` (string) - Disk format of the resulting image. This
+ option works if use_blockstorage_volume is true.
+
+- `image_tags` ([]string) - List of tags to add to the image after
+ creation.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/openstack/_ImageConfig-required.html.md b/website/source/partials/builder/openstack/_ImageConfig-required.html.md
new file mode 100644
index 000000000..880a58d48
--- /dev/null
+++ b/website/source/partials/builder/openstack/_ImageConfig-required.html.md
@@ -0,0 +1,4 @@
+
+
+- `image_name` (string) - The name of the resulting image.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/openstack/_ImageFilter-not-required.html.md b/website/source/partials/builder/openstack/_ImageFilter-not-required.html.md
new file mode 100644
index 000000000..fdff0a3c9
--- /dev/null
+++ b/website/source/partials/builder/openstack/_ImageFilter-not-required.html.md
@@ -0,0 +1,11 @@
+
+
+- `filters` (ImageFilterOptions) - filters used to select a source_image.
+ NOTE: This will fail unless exactly one image is returned, or
+ most_recent is set to true. Of the filters described in
+ ImageService, the
+ following are valid:
+
+- `most_recent` (bool) - Selects the newest created image when true.
+ This is most useful for selecting a daily distro build.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/openstack/_ImageFilterOptions-not-required.html.md b/website/source/partials/builder/openstack/_ImageFilterOptions-not-required.html.md
new file mode 100644
index 000000000..74129b434
--- /dev/null
+++ b/website/source/partials/builder/openstack/_ImageFilterOptions-not-required.html.md
@@ -0,0 +1,7 @@
+
+
+- `name` (string) - Name
+- `owner` (string) - Owner
+- `tags` ([]string) - Tags
+- `visibility` (string) - Visibility
+- `properties` (map[string]string) - Properties
\ No newline at end of file
diff --git a/website/source/partials/builder/openstack/_RunConfig-not-required.html.md b/website/source/partials/builder/openstack/_RunConfig-not-required.html.md
new file mode 100644
index 000000000..3d2cd4ed6
--- /dev/null
+++ b/website/source/partials/builder/openstack/_RunConfig-not-required.html.md
@@ -0,0 +1,85 @@
+
+
+- `availability_zone` (string) - The availability zone to launch the server
+ in. If this isn't specified, the default enforced by your OpenStack cluster
+ will be used. This may be required for some OpenStack clusters.
+
+- `rackconnect_wait` (bool) - For rackspace, whether or not to wait for
+ Rackconnect to assign the machine an IP address before connecting via SSH.
+ Defaults to false.
+
+- `floating_ip_network` (string) - The ID or name of an external network that
+ can be used for creation of a new floating IP.
+
+- `floating_ip` (string) - A specific floating IP to assign to this instance.
+
+- `reuse_ips` (bool) - Whether or not to attempt to reuse existing
+ unassigned floating ips in the project before allocating a new one. Note
+ that it is not possible to safely do this concurrently, so if you are
+ running multiple openstack builds concurrently, or if other processes are
+ assigning and using floating IPs in the same openstack project while packer
+ is running, you should not set this to true. Defaults to false.
+
+- `security_groups` ([]string) - A list of security groups by name to
+ add to this instance.
+
+- `networks` ([]string) - A list of networks by UUID to attach to
+ this instance.
+
+- `ports` ([]string) - A list of ports by UUID to attach to this
+ instance.
+
+- `user_data` (string) - User data to apply when launching the instance. Note
+ that you need to be careful about escaping characters due to the templates
+ being JSON. It is often more convenient to use user_data_file, instead.
+ Packer will not automatically wait for a user script to finish before
+ shutting down the instance this must be handled in a provisioner.
+
+- `user_data_file` (string) - Path to a file that will be used for the user
+ data when launching the instance.
+
+- `instance_name` (string) - Name that is applied to the server instance
+ created by Packer. If this isn't specified, the default is same as
+ image_name.
+
+- `instance_metadata` (map[string]string) - Metadata that is
+ applied to the server instance created by Packer. Also called server
+ properties in some documentation. The strings have a max size of 255 bytes
+ each.
+
+- `force_delete` (bool) - Whether to force the OpenStack instance to be
+ forcefully deleted. This is useful for environments that have
+ reclaim / soft deletion enabled. By default this is false.
+
+- `config_drive` (bool) - Whether or not nova should use ConfigDrive for
+ cloud-init metadata.
+
+- `floating_ip_pool` (string) - Deprecated use floating_ip_network
+ instead.
+
+- `use_blockstorage_volume` (bool) - Use Block Storage service volume for
+ the instance root volume instead of Compute service local volume (default).
+
+- `volume_name` (string) - Name of the Block Storage service volume. If this
+ isn't specified, random string will be used.
+
+- `volume_type` (string) - Type of the Block Storage service volume. If this
+ isn't specified, the default enforced by your OpenStack cluster will be
+ used.
+
+- `volume_size` (int) - Size of the Block Storage service volume in GB. If
+ this isn't specified, it is set to source image min disk value (if set) or
+ calculated from the source image bytes size. Note that in some cases this
+ needs to be specified, if use_blockstorage_volume is true.
+
+- `volume_availability_zone` (string) - Availability zone of the Block
+ Storage service volume. If omitted, Compute instance availability zone will
+ be used. If both of Compute instance and Block Storage volume availability
+ zones aren't specified, the default enforced by your OpenStack cluster will
+ be used.
+
+- `openstack_provider` (string) - Not really used, but here for BC
+
+- `use_floating_ip` (bool) - Deprecated use floating_ip or
+ floating_ip_pool instead.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/openstack/_RunConfig-required.html.md b/website/source/partials/builder/openstack/_RunConfig-required.html.md
new file mode 100644
index 000000000..6cb62ae93
--- /dev/null
+++ b/website/source/partials/builder/openstack/_RunConfig-required.html.md
@@ -0,0 +1,19 @@
+
+
+- `source_image` (string) - The ID or full URL to the base image to use. This
+ is the image that will be used to launch a new server and provision it.
+ Unless you specify completely custom SSH settings, the source image must
+ have cloud-init installed so that the keypair gets assigned properly.
+
+- `source_image_name` (string) - The name of the base image to use. This is
+ an alternative way of providing source_image and only either of them can
+ be specified.
+
+- `source_image_filter` (ImageFilter) - The search filters for determining the base
+ image to use. This is an alternative way of providing source_image and
+ only one of these methods can be used. source_image will override the
+ filters.
+
+- `flavor` (string) - The ID, name, or full URL for the desired flavor for
+ the server to be created.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/parallels/common/_HWConfig-not-required.html.md b/website/source/partials/builder/parallels/common/_HWConfig-not-required.html.md
new file mode 100644
index 000000000..4bf3f44c4
--- /dev/null
+++ b/website/source/partials/builder/parallels/common/_HWConfig-not-required.html.md
@@ -0,0 +1,14 @@
+
+
+- `cpus` (int) - The number of cpus to use for building the VM.
+ Defaults to 1.
+
+- `memory` (int) - The amount of memory to use for building the VM in
+ megabytes. Defaults to 512 megabytes.
+
+- `sound` (bool) - Specifies whether to enable the sound device when
+ building the VM. Defaults to false.
+
+- `usb` (bool) - Specifies whether to enable the USB bus when building
+ the VM. Defaults to false.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/parallels/common/_OutputConfig-not-required.html.md b/website/source/partials/builder/parallels/common/_OutputConfig-not-required.html.md
new file mode 100644
index 000000000..6798f4470
--- /dev/null
+++ b/website/source/partials/builder/parallels/common/_OutputConfig-not-required.html.md
@@ -0,0 +1,9 @@
+
+
+- `output_directory` (string) - This is the path to the directory where the
+ resulting virtual machine will be created. This may be relative or absolute.
+ If relative, the path is relative to the working directory when packer
+ is executed. This directory must not exist or be empty prior to running
+ the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the
+ name of the build.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/parallels/common/_PrlctlConfig-not-required.html.md b/website/source/partials/builder/parallels/common/_PrlctlConfig-not-required.html.md
new file mode 100644
index 000000000..176c2b101
--- /dev/null
+++ b/website/source/partials/builder/parallels/common/_PrlctlConfig-not-required.html.md
@@ -0,0 +1,13 @@
+
+
+- `prlctl` ([][]string) - Custom prlctl commands to execute
+ in order to further customize the virtual machine being created. The value
+ of this is an array of commands to execute. The commands are executed in the
+ order defined in the template. For each command, the command is defined
+ itself as an array of strings, where each string represents a single
+ argument on the command-line to prlctl (but excluding prlctl itself).
+ Each arg is treated as a configuration
+ template, where the Name
+ variable is replaced with the VM name. More details on how to use prlctl
+ are below.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/parallels/common/_PrlctlPostConfig-not-required.html.md b/website/source/partials/builder/parallels/common/_PrlctlPostConfig-not-required.html.md
new file mode 100644
index 000000000..2a7420e3b
--- /dev/null
+++ b/website/source/partials/builder/parallels/common/_PrlctlPostConfig-not-required.html.md
@@ -0,0 +1,6 @@
+
+
+- `prlctl_post` ([][]string) - Identical to prlctl, except
+ that it is run after the virtual machine is shutdown, and before the virtual
+ machine is exported.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/parallels/common/_PrlctlVersionConfig-not-required.html.md b/website/source/partials/builder/parallels/common/_PrlctlVersionConfig-not-required.html.md
new file mode 100644
index 000000000..9425ede14
--- /dev/null
+++ b/website/source/partials/builder/parallels/common/_PrlctlVersionConfig-not-required.html.md
@@ -0,0 +1,8 @@
+
+
+- `prlctl_version_file` (string) - The path within the virtual machine to
+ upload a file that contains the prlctl version that was used to create
+ the machine. This information can be useful for provisioning. By default
+ this is ".prlctl_version", which will generally upload it into the
+ home directory.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/parallels/common/_ShutdownConfig-not-required.html.md b/website/source/partials/builder/parallels/common/_ShutdownConfig-not-required.html.md
new file mode 100644
index 000000000..73f702371
--- /dev/null
+++ b/website/source/partials/builder/parallels/common/_ShutdownConfig-not-required.html.md
@@ -0,0 +1,11 @@
+
+
+- `shutdown_command` (string) - The command to use to gracefully shut down the
+ machine once all the provisioning is done. By default this is an empty
+ string, which tells Packer to just forcefully shut down the machine.
+
+- `shutdown_timeout` (string) - The amount of time to wait after executing the
+ shutdown_command for the virtual machine to actually shut down. If it
+ doesn't shut down in this time, it is an error. By default, the timeout is
+ "5m", or five minutes.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/parallels/common/_ToolsConfig-not-required.html.md b/website/source/partials/builder/parallels/common/_ToolsConfig-not-required.html.md
new file mode 100644
index 000000000..f51d66b1d
--- /dev/null
+++ b/website/source/partials/builder/parallels/common/_ToolsConfig-not-required.html.md
@@ -0,0 +1,17 @@
+
+
+- `parallels_tools_guest_path` (string) - The path in the virtual machine to
+ upload Parallels Tools. This only takes effect if parallels_tools_mode
+ is "upload". This is a configuration
+ template that has a single
+ valid variable: Flavor, which will be the value of
+ parallels_tools_flavor. By default this is "prl-tools-{{.Flavor}}.iso"
+ which should upload into the login directory of the user.
+
+- `parallels_tools_mode` (string) - The method by which Parallels Tools are
+ made available to the guest for installation. Valid options are "upload",
+ "attach", or "disable". If the mode is "attach" the Parallels Tools ISO will
+ be attached as a CD device to the virtual machine. If the mode is "upload"
+ the Parallels Tools ISO will be uploaded to the path specified by
+ parallels_tools_guest_path. The default value is "upload".
+
\ No newline at end of file
diff --git a/website/source/partials/builder/parallels/common/_ToolsConfig-required.html.md b/website/source/partials/builder/parallels/common/_ToolsConfig-required.html.md
new file mode 100644
index 000000000..1e65061c2
--- /dev/null
+++ b/website/source/partials/builder/parallels/common/_ToolsConfig-required.html.md
@@ -0,0 +1,7 @@
+
+
+- `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to
+ install into the VM. Valid values are "win", "lin", "mac", "os2"
+ and "other". This can be omitted only if parallels_tools_mode
+ is "disable".
+
\ No newline at end of file
diff --git a/website/source/partials/builder/parallels/iso/_Config-not-required.html.md b/website/source/partials/builder/parallels/iso/_Config-not-required.html.md
new file mode 100644
index 000000000..ae85e5c05
--- /dev/null
+++ b/website/source/partials/builder/parallels/iso/_Config-not-required.html.md
@@ -0,0 +1,40 @@
+
+
+- `disk_size` (uint) - The size, in megabytes, of the hard disk to create
+ for the VM. By default, this is 40000 (about 40 GB).
+
+- `disk_type` (string) - The type for image file based virtual disk drives,
+ defaults to expand. Valid options are expand (expanding disk) that the
+ image file is small initially and grows in size as you add data to it, and
+ plain (plain disk) that the image file has a fixed size from the moment it
+ is created (i.e the space is allocated for the full drive). Plain disks
+ perform faster than expanding disks. skip_compaction will be set to true
+ automatically for plain disks.
+
+- `guest_os_type` (string) - The guest OS type being installed. By default
+ this is "other", but you can get dramatic performance improvements by
+ setting this to the proper value. To view all available values for this run
+ prlctl create x --distribution list. Setting the correct value hints to
+ Parallels Desktop how to optimize the virtual hardware to work best with
+ that operating system.
+
+- `hard_drive_interface` (string) - The type of controller that the hard
+ drives are attached to, defaults to "sata". Valid options are "sata", "ide",
+ and "scsi".
+
+- `host_interfaces` ([]string) - A list of which interfaces on the
+ host should be searched for a IP address. The first IP address found on one
+ of these will be used as {{ .HTTPIP }} in the boot_command. Defaults to
+ ["en0", "en1", "en2", "en3", "en4", "en5", "en6", "en7", "en8", "en9",
+ "ppp0", "ppp1", "ppp2"].
+
+- `skip_compaction` (bool) - Virtual disk image is compacted at the end of
+ the build process using prl_disk_tool utility (except for the case that
+ disk_type is set to plain). In certain rare cases, this might corrupt
+ the resulting disk image. If you find this to be the case, you can disable
+ compaction using this configuration value.
+
+- `vm_name` (string) - This is the name of the PVM directory for the new
+ virtual machine, without the file extension. By default this is
+ "packer-BUILDNAME", where "BUILDNAME" is the name of the build.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/parallels/pvm/_Config-not-required.html.md b/website/source/partials/builder/parallels/pvm/_Config-not-required.html.md
new file mode 100644
index 000000000..ff5d3e4d1
--- /dev/null
+++ b/website/source/partials/builder/parallels/pvm/_Config-not-required.html.md
@@ -0,0 +1,16 @@
+
+
+- `skip_compaction` (bool) - Virtual disk image is compacted at the end of
+ the build process using prl_disk_tool utility (except for the case that
+ disk_type is set to plain). In certain rare cases, this might corrupt
+ the resulting disk image. If you find this to be the case, you can disable
+ compaction using this configuration value.
+
+- `vm_name` (string) - This is the name of the PVM directory for the new
+ virtual machine, without the file extension. By default this is
+ "packer-BUILDNAME", where "BUILDNAME" is the name of the build.
+
+- `reassign_mac` (bool) - If this is "false" the MAC address of the first
+ NIC will reused when imported else a new MAC address will be generated
+ by Parallels. Defaults to "false".
+
\ No newline at end of file
diff --git a/website/source/partials/builder/parallels/pvm/_Config-required.html.md b/website/source/partials/builder/parallels/pvm/_Config-required.html.md
new file mode 100644
index 000000000..58e68b704
--- /dev/null
+++ b/website/source/partials/builder/parallels/pvm/_Config-required.html.md
@@ -0,0 +1,5 @@
+
+
+- `source_path` (string) - The path to a PVM directory that acts as the source
+ of this build.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/qemu/_Config-not-required.html.md b/website/source/partials/builder/qemu/_Config-not-required.html.md
new file mode 100644
index 000000000..f70a9979e
--- /dev/null
+++ b/website/source/partials/builder/qemu/_Config-not-required.html.md
@@ -0,0 +1,140 @@
+
+
+- `iso_skip_cache` (bool) - Use iso from provided url. Qemu must support
+ curl block device. This defaults to false.
+
+- `accelerator` (string) - The accelerator type to use when running the VM.
+ This may be none, kvm, tcg, hax, hvf, whpx, or xen. The appropriate
+ software must have already been installed on your build machine to use the
+ accelerator you specified. When no accelerator is specified, Packer will try
+ to use kvm if it is available but will default to tcg otherwise.
+
+- `cpus` (int) - The number of cpus to use when building the VM.
+ The default is 1 CPU.
+
+- `disk_interface` (string) - The interface to use for the disk. Allowed
+ values include any of ide, scsi, virtio or virtio-scsi*. Note
+ also that any boot commands or kickstart type scripts must have proper
+ adjustments for resulting device names. The Qemu builder uses virtio by
+ default.
+
+- `disk_size` (uint) - The size, in megabytes, of the hard disk to create
+ for the VM. By default, this is 40960 (40 GB).
+
+- `disk_cache` (string) - The cache mode to use for disk. Allowed values
+ include any of writethrough, writeback, none, unsafe
+ or directsync. By default, this is set to writeback.
+
+- `disk_discard` (string) - The discard mode to use for disk. Allowed values
+ include any of unmap or ignore. By default, this is set to ignore.
+
+- `disk_detect_zeroes` (string) - The detect-zeroes mode to use for disk.
+ Allowed values include any of unmap, on or off. Defaults to off.
+ When the value is "off" we don't set the flag in the qemu command, so that
+ Packer still works with old versions of QEMU that don't have this option.
+
+- `skip_compaction` (bool) - Packer compacts the QCOW2 image using
+ qemu-img convert. Set this option to true to disable compacting.
+ Defaults to false.
+
+- `disk_compression` (bool) - Apply compression to the QCOW2 disk file
+ using qemu-img convert. Defaults to false.
+
+- `format` (string) - Either qcow2 or raw, this specifies the output
+ format of the virtual machine image. This defaults to qcow2.
+
+- `headless` (bool) - Packer defaults to building QEMU virtual machines by
+ launching a GUI that shows the console of the machine being built. When this
+ value is set to true, the machine will start without a console.
+
+- `disk_image` (bool) - Packer defaults to building from an ISO file, this
+ parameter controls whether the ISO URL supplied is actually a bootable
+ QEMU image. When this value is set to true, the machine will either clone
+ the source or use it as a backing file (if use_backing_file is true);
+ then, it will resize the image according to disk_size and boot it.
+
+- `use_backing_file` (bool) - Only applicable when disk_image is true
+ and format is qcow2, set this option to true to create a new QCOW2
+ file that uses the file located at iso_url as a backing file. The new file
+ will only contain blocks that have changed compared to the backing file, so
+ enabling this option can significantly reduce disk usage.
+
+- `machine_type` (string) - The type of machine emulation to use. Run your
+ qemu binary with the flags -machine help to list available types for
+ your system. This defaults to pc.
+
+- `memory` (int) - The amount of memory to use when building the VM
+ in megabytes. This defaults to 512 megabytes.
+
+- `net_device` (string) - The driver to use for the network interface. Allowed
+ values ne2k_pci, i82551, i82557b, i82559er, rtl8139, e1000,
+ pcnet, virtio, virtio-net, virtio-net-pci, usb-net, i82559a,
+ i82559b, i82559c, i82550, i82562, i82557a, i82557c, i82801,
+ vmxnet3, i82558a or i82558b. The Qemu builder uses virtio-net by
+ default.
+
+- `output_directory` (string) - This is the path to the directory where the
+ resulting virtual machine will be created. This may be relative or absolute.
+ If relative, the path is relative to the working directory when packer
+ is executed. This directory must not exist or be empty prior to running
+ the builder. By default this is output-BUILDNAME where "BUILDNAME" is the
+ name of the build.
+
+- `qemuargs` ([][]string) - Allows complete control over the
+ qemu command line (though not, at this time, qemu-img). Each array of
+ strings makes up a command line switch that overrides matching default
+ switch/value pairs. Any value specified as an empty string is ignored. All
+ values after the switch are concatenated with no separator.
+
+- `qemu_binary` (string) - The name of the Qemu binary to look for. This
+ defaults to qemu-system-x86_64, but may need to be changed for
+ some platforms. For example qemu-kvm, or qemu-system-i386 may be a
+ better choice for some systems.
+
+- `shutdown_command` (string) - The command to use to gracefully shut down the
+ machine once all the provisioning is done. By default this is an empty
+ string, which tells Packer to just forcefully shut down the machine unless a
+ shutdown command takes place inside script so this may safely be omitted. It
+ is important to add a shutdown_command. By default Packer halts the virtual
+ machine and the file system may not be sync'd. Thus, changes made in a
+ provisioner might not be saved. If one or more scripts require a reboot it is
+ suggested to leave this blank since reboots may fail and specify the final
+ shutdown command in your last script.
+
+- `ssh_host_port_min` (int) - The minimum and
+ maximum port to use for the SSH port on the host machine which is forwarded
+ to the SSH port on the guest machine. Because Packer often runs in parallel,
+ Packer will choose a randomly available port in this range to use as the
+ host port. By default this is 2222 to 4444.
+
+- `ssh_host_port_max` (int) - SSH Host Port Max
+- `use_default_display` (bool) - If true, do not pass a -display option
+ to qemu, allowing it to choose the default. This may be needed when running
+ under macOS, and getting errors about sdl not being available.
+
+- `vnc_bind_address` (string) - The IP address that should be
+ binded to for VNC. By default packer will use 127.0.0.1 for this. If you
+ wish to bind to all interfaces use 0.0.0.0.
+
+- `vnc_port_min` (int) - The minimum and maximum port
+ to use for VNC access to the virtual machine. The builder uses VNC to type
+ the initial boot_command. Because Packer generally runs in parallel,
+ Packer uses a randomly chosen port in this range that appears available. By
+ default this is 5900 to 6000. The minimum and maximum ports are inclusive.
+
+- `vnc_port_max` (int) - VNC Port Max
+- `vm_name` (string) - This is the name of the image (QCOW2 or IMG) file for
+ the new virtual machine. By default this is packer-BUILDNAME, where
+ "BUILDNAME" is the name of the build. Currently, no file extension will be
+ used unless it is specified in this option.
+
+- `ssh_wait_timeout` (time.Duration) - These are deprecated, but we keep them around for BC
+ TODO(@mitchellh): remove
+
+- `run_once` (bool) - TODO(mitchellh): deprecate
+
+- `shutdown_timeout` (string) - The amount of time to wait after executing the
+ shutdown_command for the virtual machine to actually shut down. If it
+ doesn't shut down in this time, it is an error. By default, the timeout is
+ 5m or five minutes.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/scaleway/_Config-not-required.html.md b/website/source/partials/builder/scaleway/_Config-not-required.html.md
new file mode 100644
index 000000000..251639fdd
--- /dev/null
+++ b/website/source/partials/builder/scaleway/_Config-not-required.html.md
@@ -0,0 +1,17 @@
+
+
+- `snapshot_name` (string) - The name of the resulting snapshot that will
+ appear in your account. Default packer-TIMESTAMP
+
+- `image_name` (string) - The name of the resulting image that will appear in
+ your account. Default packer-TIMESTAMP
+
+- `server_name` (string) - The name assigned to the server. Default
+ packer-UUID
+
+- `bootscript` (string) - The id of an existing bootscript to use when
+ booting the server.
+
+- `boottype` (string) - The type of boot, can be either local or
+ bootscript, Default bootscript
+
\ No newline at end of file
diff --git a/website/source/partials/builder/scaleway/_Config-required.html.md b/website/source/partials/builder/scaleway/_Config-required.html.md
new file mode 100644
index 000000000..0650c8529
--- /dev/null
+++ b/website/source/partials/builder/scaleway/_Config-required.html.md
@@ -0,0 +1,29 @@
+
+
+- `api_token` (string) - The token to use to authenticate with your account.
+ It can also be specified via environment variable SCALEWAY_API_TOKEN. You
+ can see and generate tokens in the "Credentials"
+ section of the control panel.
+
+- `organization_id` (string) - The organization id to use to identify your
+ organization. It can also be specified via environment variable
+ SCALEWAY_ORGANIZATION. Your organization id is available in the
+ "Account" section of the
+ control panel.
+ Previously named: api_access_key with environment variable: SCALEWAY_API_ACCESS_KEY
+
+- `region` (string) - The name of the region to launch the server in (par1
+ or ams1). Consequently, this is the region where the snapshot will be
+ available.
+
+- `image` (string) - The UUID of the base image to use. This is the image
+ that will be used to launch a new server and provision it. See
+ the images list
+ get the complete list of the accepted image UUID.
+
+- `commercial_type` (string) - The name of the server commercial type:
+ ARM64-128GB, ARM64-16GB, ARM64-2GB, ARM64-32GB, ARM64-4GB,
+ ARM64-64GB, ARM64-8GB, C1, C2L, C2M, C2S, START1-L,
+ START1-M, START1-S, START1-XS, X64-120GB, X64-15GB, X64-30GB,
+ X64-60GB
+
\ No newline at end of file
diff --git a/website/source/partials/builder/tencentcloud/cvm/_TencentCloudAccessConfig-not-required.html.md b/website/source/partials/builder/tencentcloud/cvm/_TencentCloudAccessConfig-not-required.html.md
new file mode 100644
index 000000000..e6a643f42
--- /dev/null
+++ b/website/source/partials/builder/tencentcloud/cvm/_TencentCloudAccessConfig-not-required.html.md
@@ -0,0 +1,4 @@
+
+
+- `skip_region_validation` (bool) - Do not check region and zone when validate.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/tencentcloud/cvm/_TencentCloudAccessConfig-required.html.md b/website/source/partials/builder/tencentcloud/cvm/_TencentCloudAccessConfig-required.html.md
new file mode 100644
index 000000000..4f0164c62
--- /dev/null
+++ b/website/source/partials/builder/tencentcloud/cvm/_TencentCloudAccessConfig-required.html.md
@@ -0,0 +1,16 @@
+
+
+- `secret_id` (string) - Tencentcloud secret id. You should set it directly,
+ or set the TENCENTCLOUD_ACCESS_KEY environment variable.
+
+- `secret_key` (string) - Tencentcloud secret key. You should set it directly,
+ or set the TENCENTCLOUD_SECRET_KEY environment variable.
+
+- `region` (string) - The region where your cvm will be launch. You should
+ reference Region and Zone
+ for parameter taking.
+
+- `zone` (string) - The zone where your cvm will be launch. You should
+ reference Region and Zone
+ for parameter taking.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/tencentcloud/cvm/_TencentCloudImageConfig-not-required.html.md b/website/source/partials/builder/tencentcloud/cvm/_TencentCloudImageConfig-not-required.html.md
new file mode 100644
index 000000000..f04670148
--- /dev/null
+++ b/website/source/partials/builder/tencentcloud/cvm/_TencentCloudImageConfig-not-required.html.md
@@ -0,0 +1,21 @@
+
+
+- `image_description` (string) - Image description.
+
+- `reboot` (bool) - Whether shutdown cvm to create Image. Default value is
+ false.
+
+- `force_poweroff` (bool) - Whether to force power off cvm when create image.
+ Default value is false.
+
+- `sysprep` (bool) - Whether enable Sysprep during creating windows image.
+
+- `image_force_delete` (bool) - Image Force Delete
+- `image_copy_regions` ([]string) - regions that will be copied to after
+ your image created.
+
+- `image_share_accounts` ([]string) - accounts that will be shared to
+ after your image created.
+
+- `skip_region_validation` (bool) - Do not check region and zone when validate.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/tencentcloud/cvm/_TencentCloudImageConfig-required.html.md b/website/source/partials/builder/tencentcloud/cvm/_TencentCloudImageConfig-required.html.md
new file mode 100644
index 000000000..5020f004c
--- /dev/null
+++ b/website/source/partials/builder/tencentcloud/cvm/_TencentCloudImageConfig-required.html.md
@@ -0,0 +1,6 @@
+
+
+- `image_name` (string) - The name you want to create your customize image,
+ it should be composed of no more than 20 characters, of letters, numbers
+ or minus sign.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/tencentcloud/cvm/_TencentCloudRunConfig-not-required.html.md b/website/source/partials/builder/tencentcloud/cvm/_TencentCloudRunConfig-not-required.html.md
new file mode 100644
index 000000000..d07449f6c
--- /dev/null
+++ b/website/source/partials/builder/tencentcloud/cvm/_TencentCloudRunConfig-not-required.html.md
@@ -0,0 +1,44 @@
+
+
+- `associate_public_ip_address` (bool) - Whether allocate public ip to your cvm.
+ Default value is false.
+
+- `instance_name` (string) - Instance name.
+
+- `disk_type` (string) - Root disk type your cvm will be launched by. you could
+ reference Disk Type
+ for parameter taking.
+
+- `disk_size` (int64) - Root disk size your cvm will be launched by. values range(in GB):
+
+- `vpc_id` (string) - Specify vpc your cvm will be launched by.
+
+- `vpc_name` (string) - Specify vpc name you will create. if vpc_id is not set, packer will
+ create a vpc for you named this parameter.
+
+- `vpc_ip` (string) - Vpc Ip
+- `subnet_id` (string) - Specify subnet your cvm will be launched by.
+
+- `subnet_name` (string) - Specify subnet name you will create. if subnet_id is not set, packer will
+ create a subnet for you named this parameter.
+
+- `cidr_block` (string) - Specify cider block of the vpc you will create if vpc_id not set
+
+- `subnect_cidr_block` (string) - Specify cider block of the subnet you will create if
+ subnet_id not set
+
+- `internet_charge_type` (string) - Internet Charge Type
+- `internet_max_bandwidth_out` (int64) - Max bandwidth out your cvm will be launched by(in MB).
+ values can be set between 1 ~ 100.
+
+- `security_group_id` (string) - Specify security group your cvm will be launched by.
+
+- `security_group_name` (string) - Specify security name you will create if security_group_id not set.
+
+- `user_data` (string) - userdata.
+
+- `user_data_file` (string) - userdata file.
+
+- `host_name` (string) - host name.
+
+- `ssh_private_ip` (bool) - SSH Private Ip
\ No newline at end of file
diff --git a/website/source/partials/builder/tencentcloud/cvm/_TencentCloudRunConfig-required.html.md b/website/source/partials/builder/tencentcloud/cvm/_TencentCloudRunConfig-required.html.md
new file mode 100644
index 000000000..99abbbaef
--- /dev/null
+++ b/website/source/partials/builder/tencentcloud/cvm/_TencentCloudRunConfig-required.html.md
@@ -0,0 +1,9 @@
+
+
+- `source_image_id` (string) - The base image id of Image you want to create
+ your customized image from.
+
+- `instance_type` (string) - The instance type your cvm will be launched by.
+ You should reference Instace Type
+ for parameter taking.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/triton/_AccessConfig-not-required.html.md b/website/source/partials/builder/triton/_AccessConfig-not-required.html.md
new file mode 100644
index 000000000..dd99fbc01
--- /dev/null
+++ b/website/source/partials/builder/triton/_AccessConfig-not-required.html.md
@@ -0,0 +1,20 @@
+
+
+- `triton_url` (string) - The URL of the Triton cloud API to use. If omitted
+ it will default to the us-sw-1 region of the Joyent Public cloud. If you
+ are using your own private Triton installation you will have to supply the
+ URL of the cloud API of your own Triton installation.
+
+- `triton_user` (string) - The username of a user who has access to your
+ Triton account.
+
+- `triton_key_material` (string) - Path to the file in which the private key
+ of triton_key_id is stored. For example /home/soandso/.ssh/id_rsa. If
+ this is not specified, the SSH agent is used to sign requests with the
+ triton_key_id specified.
+
+- `insecure_skip_tls_verify` (bool) - secure_skip_tls_verify - (bool) This allows skipping TLS verification
+ of the Triton endpoint. It is useful when connecting to a temporary Triton
+ installation such as Cloud-On-A-Laptop which does not generally use a
+ certificate signed by a trusted root CA. The default is false.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/triton/_AccessConfig-required.html.md b/website/source/partials/builder/triton/_AccessConfig-required.html.md
new file mode 100644
index 000000000..403b64ed9
--- /dev/null
+++ b/website/source/partials/builder/triton/_AccessConfig-required.html.md
@@ -0,0 +1,10 @@
+
+
+- `triton_account` (string) - The username of the Triton account to use when
+ using the Triton Cloud API.
+
+- `triton_key_id` (string) - The fingerprint of the public key of the SSH key
+ pair to use for authentication with the Triton Cloud API. If
+ triton_key_material is not set, it is assumed that the SSH agent has the
+ private key corresponding to this key ID loaded.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/triton/_MachineImageFilter-not-required.html.md b/website/source/partials/builder/triton/_MachineImageFilter-not-required.html.md
new file mode 100644
index 000000000..e60e43734
--- /dev/null
+++ b/website/source/partials/builder/triton/_MachineImageFilter-not-required.html.md
@@ -0,0 +1,3 @@
+
+
+- `most_recent` (bool) - Most Recent
\ No newline at end of file
diff --git a/website/source/partials/builder/triton/_SourceMachineConfig-not-required.html.md b/website/source/partials/builder/triton/_SourceMachineConfig-not-required.html.md
new file mode 100644
index 000000000..a3e6d821e
--- /dev/null
+++ b/website/source/partials/builder/triton/_SourceMachineConfig-not-required.html.md
@@ -0,0 +1,39 @@
+
+
+- `source_machine_name` (string) - Name of the VM used for building the
+ image. Does not affect (and does not have to be the same) as the name for a
+ VM instance running this image. Maximum 512 characters but should in
+ practice be much shorter (think between 5 and 20 characters). For example
+ mysql-64-server-image-builder. When omitted defaults to
+ packer-builder-[image_name].
+
+- `source_machine_networks` ([]string) - The UUID's of Triton
+ networks added to the source machine used for creating the image. For
+ example if any of the provisioners which are run need Internet access you
+ will need to add the UUID's of the appropriate networks here. If this is
+ not specified, instances will be placed into the default Triton public and
+ internal networks.
+
+- `source_machine_metadata` (map[string]string) - Triton metadata
+ applied to the VM used to create the image. Metadata can be used to pass
+ configuration information to the VM without the need for networking. See
+ Using the metadata
+ API in the
+ Joyent documentation for more information. This can for example be used to
+ set the user-script metadata key to have Triton start a user supplied
+ script after the VM has booted.
+
+- `source_machine_tags` (map[string]string) - Tags applied to the
+ VM used to create the image.
+
+- `source_machine_firewall_enabled` (bool) - Whether or not the firewall
+ of the VM used to create an image of is enabled. The Triton firewall only
+ filters inbound traffic to the VM. All outbound traffic is always allowed.
+ Currently this builder does not provide an interface to add specific
+ firewall rules. Unless you have a global rule defined in Triton which
+ allows SSH traffic enabling the firewall will interfere with the SSH
+ provisioner. The default is false.
+
+- `source_machine_image_filter` (MachineImageFilter) - Filters used to populate the
+ source_machine_image field. Example:
+
\ No newline at end of file
diff --git a/website/source/partials/builder/triton/_SourceMachineConfig-required.html.md b/website/source/partials/builder/triton/_SourceMachineConfig-required.html.md
new file mode 100644
index 000000000..2855845c7
--- /dev/null
+++ b/website/source/partials/builder/triton/_SourceMachineConfig-required.html.md
@@ -0,0 +1,19 @@
+
+
+- `source_machine_package` (string) - The Triton package to use while
+ building the image. Does not affect (and does not have to be the same) as
+ the package which will be used for a VM instance running this image. On the
+ Joyent public cloud this could for example be g3-standard-0.5-smartos.
+
+- `source_machine_image` (string) - The UUID of the image to base the new
+ image on. Triton supports multiple types of images, called 'brands' in
+ Triton / Joyent lingo, for contains and VM's. See the chapter Containers
+ and virtual machines in
+ the Joyent Triton documentation for detailed information. The following
+ brands are currently supported by this builder:joyent andkvm. The
+ choice of base image automatically decides the brand. On the Joyent public
+ cloud a valid source_machine_image could for example be
+ 70e3ae72-96b6-11e6-9056-9737fd4d0764 for version 16.3.1 of the 64bit
+ SmartOS base image (a 'joyent' brand image). source_machine_image_filter
+ can be used to populate this UUID.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/triton/_TargetImageConfig-not-required.html.md b/website/source/partials/builder/triton/_TargetImageConfig-not-required.html.md
new file mode 100644
index 000000000..c66a0fef6
--- /dev/null
+++ b/website/source/partials/builder/triton/_TargetImageConfig-not-required.html.md
@@ -0,0 +1,17 @@
+
+
+- `image_description` (string) - Description of the image. Maximum 512
+ characters.
+
+- `image_homepage` (string) - URL of the homepage where users can find
+ information about the image. Maximum 128 characters.
+
+- `image_eula_url` (string) - URL of the End User License Agreement (EULA)
+ for the image. Maximum 128 characters.
+
+- `image_acls` ([]string) - The UUID's of the users which will have
+ access to this image. When omitted only the owner (the Triton user whose
+ credentials are used) will have access to the image.
+
+- `image_tags` (map[string]string) - Tag applied to the image.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/triton/_TargetImageConfig-required.html.md b/website/source/partials/builder/triton/_TargetImageConfig-required.html.md
new file mode 100644
index 000000000..def119892
--- /dev/null
+++ b/website/source/partials/builder/triton/_TargetImageConfig-required.html.md
@@ -0,0 +1,12 @@
+
+
+- `image_name` (string) - The name the finished image in Triton will be
+ assigned. Maximum 512 characters but should in practice be much shorter
+ (think between 5 and 20 characters). For example postgresql-95-server for
+ an image used as a PostgreSQL 9.5 server.
+
+- `image_version` (string) - The version string for this image. Maximum 128
+ characters. Any string will do but a format of Major.Minor.Patch is
+ strongly advised by Joyent. See Semantic Versioning
+ for more information on the Major.Minor.Patch versioning format.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/vagrant/_Config-not-required.html.md b/website/source/partials/builder/vagrant/_Config-not-required.html.md
new file mode 100644
index 000000000..7611edca6
--- /dev/null
+++ b/website/source/partials/builder/vagrant/_Config-not-required.html.md
@@ -0,0 +1,69 @@
+
+
+- `output_dir` (string) - The directory to create that will contain your output box. We always
+ create this directory and run from inside of it to prevent Vagrant init
+ collisions. If unset, it will be set to packer- plus your buildname.
+
+- `checksum` (string) - The checksum for the .box file. The type of the checksum is specified
+ with checksum_type, documented below.
+
+- `checksum_type` (string) - The type of the checksum specified in checksum. Valid values are none,
+ md5, sha1, sha256, or sha512. Although the checksum will not be verified
+ when checksum_type is set to "none", this is not recommended since OVA
+ files can be very large and corruption does happen from time to time.
+
+- `box_name` (string) - if your source_box is a boxfile that we need to add to Vagrant, this is
+ the name to give it. If left blank, will default to "packer_" plus your
+ buildname.
+
+- `provider` (string) - The vagrant provider.
+ This parameter is required when source_path have more than one provider,
+ or when using vagrant-cloud post-processor. Defaults to unset.
+
+- `communicator` (string) - Communicator
+- `vagrantfile_template` (string) - What vagrantfile to use
+
+- `teardown_method` (string) - Whether to halt, suspend, or destroy the box when the build has
+ completed. Defaults to "halt"
+
+- `box_version` (string) - What box version to use when initializing Vagrant.
+
+- `template` (string) - a path to a golang template for a vagrantfile. Our default template can
+ be found here. So far the only template variables available to you are
+ {{ .BoxName }} and {{ .SyncedFolder }}, which correspond to the Packer
+ options box_name and synced_folder.
+
+- `synced_folder` (string) - Synced Folder
+- `skip_add` (bool) - Don't call "vagrant add" to add the box to your local environment; this
+ is necessary if you want to launch a box that is already added to your
+ vagrant environment.
+
+- `add_cacert` (string) - Equivalent to setting the
+ --cacert
+ option in vagrant add; defaults to unset.
+
+- `add_capath` (string) - Equivalent to setting the
+ --capath option
+ in vagrant add; defaults to unset.
+
+- `add_cert` (string) - Equivalent to setting the
+ --cert option in
+ vagrant add; defaults to unset.
+
+- `add_clean` (bool) - Equivalent to setting the
+ --clean flag in
+ vagrant add; defaults to unset.
+
+- `add_force` (bool) - Equivalent to setting the
+ --force flag in
+ vagrant add; defaults to unset.
+
+- `add_insecure` (bool) - Equivalent to setting the
+ --insecure flag in
+ vagrant add; defaults to unset.
+
+- `skip_package` (bool) - if true, Packer will not call vagrant package to
+ package your base box into its own standalone .box file.
+
+- `output_vagrantfile` (string) - Output Vagrantfile
+- `package_include` ([]string) - Package Include
\ No newline at end of file
diff --git a/website/source/partials/builder/vagrant/_Config-required.html.md b/website/source/partials/builder/vagrant/_Config-required.html.md
new file mode 100644
index 000000000..a811f3ad0
--- /dev/null
+++ b/website/source/partials/builder/vagrant/_Config-required.html.md
@@ -0,0 +1,17 @@
+
+
+- `source_path` (string) - URL of the vagrant box to use, or the name of the vagrant box.
+ hashicorp/precise64, ./mylocalbox.box and https://example.com/my-box.box
+ are all valid source boxes. If your source is a .box file, whether
+ locally or from a URL like the latter example above, you will also need
+ to provide a box_name. This option is required, unless you set
+ global_id. You may only set one or the other, not both.
+
+- `global_id` (string) - the global id of a Vagrant box already added to Vagrant on your system.
+ You can find the global id of your Vagrant boxes using the command
+ vagrant global-status; your global_id will be a 7-digit number and
+ letter comination that you'll find in the leftmost column of the
+ global-status output. If you choose to use global_id instead of
+ source_box, Packer will skip the Vagrant initialize and add steps, and
+ simply launch the box directly using the global id.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/virtualbox/common/_ExportConfig-not-required.html.md b/website/source/partials/builder/virtualbox/common/_ExportConfig-not-required.html.md
new file mode 100644
index 000000000..9fe3971b2
--- /dev/null
+++ b/website/source/partials/builder/virtualbox/common/_ExportConfig-not-required.html.md
@@ -0,0 +1,5 @@
+
+
+- `format` (string) - Either ovf or ova, this specifies the output format
+ of the exported virtual machine. This defaults to ovf.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/virtualbox/common/_ExportOpts-not-required.html.md b/website/source/partials/builder/virtualbox/common/_ExportOpts-not-required.html.md
new file mode 100644
index 000000000..dec0c855a
--- /dev/null
+++ b/website/source/partials/builder/virtualbox/common/_ExportOpts-not-required.html.md
@@ -0,0 +1,8 @@
+
+
+- `export_opts` ([]string) - Additional options to pass to the
+ VBoxManage
+ export. This
+ can be useful for passing product information to include in the resulting
+ appliance file. Packer JSON configuration file example:
+
\ No newline at end of file
diff --git a/website/source/partials/builder/virtualbox/common/_GuestAdditionsConfig-not-required.html.md b/website/source/partials/builder/virtualbox/common/_GuestAdditionsConfig-not-required.html.md
new file mode 100644
index 000000000..8ee1d7a57
--- /dev/null
+++ b/website/source/partials/builder/virtualbox/common/_GuestAdditionsConfig-not-required.html.md
@@ -0,0 +1,11 @@
+
+
+- `communicator` (string) - Communicator
+- `guest_additions_mode` (string) - The method by which guest additions are
+ made available to the guest for installation. Valid options are upload,
+ attach, or disable. If the mode is attach the guest additions ISO will
+ be attached as a CD device to the virtual machine. If the mode is upload
+ the guest additions ISO will be uploaded to the path specified by
+ guest_additions_path. The default value is upload. If disable is used,
+ guest additions won't be downloaded, either.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/virtualbox/common/_HWConfig-not-required.html.md b/website/source/partials/builder/virtualbox/common/_HWConfig-not-required.html.md
new file mode 100644
index 000000000..058663be1
--- /dev/null
+++ b/website/source/partials/builder/virtualbox/common/_HWConfig-not-required.html.md
@@ -0,0 +1,15 @@
+
+
+- `cpus` (int) - The number of cpus to use for building the VM.
+ Defaults to 1.
+
+- `memory` (int) - The amount of memory to use for building the VM
+ in megabytes. Defaults to 512 megabytes.
+
+- `sound` (string) - Defaults to none. The type of audio device to use for
+ sound when building the VM. Some of the options that are available are
+ dsound, oss, alsa, pulse, coreaudio, null.
+
+- `usb` (bool) - Specifies whether or not to enable the USB bus when
+ building the VM. Defaults to false.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/virtualbox/common/_OutputConfig-not-required.html.md b/website/source/partials/builder/virtualbox/common/_OutputConfig-not-required.html.md
new file mode 100644
index 000000000..5794e6900
--- /dev/null
+++ b/website/source/partials/builder/virtualbox/common/_OutputConfig-not-required.html.md
@@ -0,0 +1,9 @@
+
+
+- `output_directory` (string) - This is the path to the directory where the
+ resulting virtual machine will be created. This may be relative or absolute.
+ If relative, the path is relative to the working directory when packer
+ is executed. This directory must not exist or be empty prior to running
+ the builder. By default this is output-BUILDNAME where "BUILDNAME" is the
+ name of the build.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/virtualbox/common/_RunConfig-not-required.html.md b/website/source/partials/builder/virtualbox/common/_RunConfig-not-required.html.md
new file mode 100644
index 000000000..57715b17d
--- /dev/null
+++ b/website/source/partials/builder/virtualbox/common/_RunConfig-not-required.html.md
@@ -0,0 +1,17 @@
+
+
+- `headless` (bool) - Packer defaults to building VirtualBox virtual
+ machines by launching a GUI that shows the console of the machine
+ being built. When this value is set to true, the machine will start
+ without a console.
+
+- `vrdp_bind_address` (string) - The IP address that should be
+ binded to for VRDP. By default packer will use 127.0.0.1 for this. If you
+ wish to bind to all interfaces use 0.0.0.0.
+
+- `vrdp_port_min` (int) - The minimum and maximum port
+ to use for VRDP access to the virtual machine. Packer uses a randomly chosen
+ port in this range that appears available. By default this is 5900 to
+ 6000. The minimum and maximum ports are inclusive.
+
+- `vrdp_port_max` (int) - VRDP Port Max
\ No newline at end of file
diff --git a/website/source/partials/builder/virtualbox/common/_SSHConfig-not-required.html.md b/website/source/partials/builder/virtualbox/common/_SSHConfig-not-required.html.md
new file mode 100644
index 000000000..0db417ad5
--- /dev/null
+++ b/website/source/partials/builder/virtualbox/common/_SSHConfig-not-required.html.md
@@ -0,0 +1,16 @@
+
+
+- `ssh_host_port_min` (int) - The minimum and
+ maximum port to use for the SSH port on the host machine which is forwarded
+ to the SSH port on the guest machine. Because Packer often runs in parallel,
+ Packer will choose a randomly available port in this range to use as the
+ host port. By default this is 2222 to 4444.
+
+- `ssh_host_port_max` (int) - SSH Host Port Max
+- `ssh_skip_nat_mapping` (bool) - Defaults to false. When enabled, Packer
+ does not setup forwarded port mapping for SSH requests and uses ssh_port
+ on the host to communicate to the virtual machine.
+
+- `ssh_wait_timeout` (time.Duration) - These are deprecated, but we keep them around for BC
+ TODO(@mitchellh): remove
+
\ No newline at end of file
diff --git a/website/source/partials/builder/virtualbox/common/_ShutdownConfig-not-required.html.md b/website/source/partials/builder/virtualbox/common/_ShutdownConfig-not-required.html.md
new file mode 100644
index 000000000..aba3347fe
--- /dev/null
+++ b/website/source/partials/builder/virtualbox/common/_ShutdownConfig-not-required.html.md
@@ -0,0 +1,20 @@
+
+
+- `shutdown_command` (string) - The command to use to gracefully shut down the
+ machine once all the provisioning is done. By default this is an empty
+ string, which tells Packer to just forcefully shut down the machine unless a
+ shutdown command takes place inside script so this may safely be omitted. If
+ one or more scripts require a reboot it is suggested to leave this blank
+ since reboots may fail and specify the final shutdown command in your
+ last script.
+
+- `shutdown_timeout` (string) - The amount of time to wait after executing the
+ shutdown_command for the virtual machine to actually shut down. If it
+ doesn't shut down in this time, it is an error. By default, the timeout is
+ 5m or five minutes.
+
+- `post_shutdown_delay` (string) - The amount of time to wait after shutting
+ down the virtual machine. If you get the error
+ Error removing floppy controller, you might need to set this to 5m
+ or so. By default, the delay is 0s or disabled.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/virtualbox/common/_VBoxBundleConfig-not-required.html.md b/website/source/partials/builder/virtualbox/common/_VBoxBundleConfig-not-required.html.md
new file mode 100644
index 000000000..8a759809f
--- /dev/null
+++ b/website/source/partials/builder/virtualbox/common/_VBoxBundleConfig-not-required.html.md
@@ -0,0 +1,7 @@
+
+
+- `bundle_iso` (bool) - Defaults to false. When enabled, Packer includes
+ any attached ISO disc devices into the final virtual machine. Useful for
+ some live distributions that require installation media to continue to be
+ attached after installation.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/virtualbox/common/_VBoxManageConfig-not-required.html.md b/website/source/partials/builder/virtualbox/common/_VBoxManageConfig-not-required.html.md
new file mode 100644
index 000000000..970f5281a
--- /dev/null
+++ b/website/source/partials/builder/virtualbox/common/_VBoxManageConfig-not-required.html.md
@@ -0,0 +1,13 @@
+
+
+- `vboxmanage` ([][]string) - Custom VBoxManage commands to
+ execute in order to further customize the virtual machine being created. The
+ value of this is an array of commands to execute. The commands are executed
+ in the order defined in the template. For each command, the command is
+ defined itself as an array of strings, where each string represents a single
+ argument on the command-line to VBoxManage (but excluding
+ VBoxManage itself). Each arg is treated as a configuration
+ template, where the Name
+ variable is replaced with the VM name. More details on how to use
+ VBoxManage are below.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/virtualbox/common/_VBoxManagePostConfig-not-required.html.md b/website/source/partials/builder/virtualbox/common/_VBoxManagePostConfig-not-required.html.md
new file mode 100644
index 000000000..83c75ab18
--- /dev/null
+++ b/website/source/partials/builder/virtualbox/common/_VBoxManagePostConfig-not-required.html.md
@@ -0,0 +1,6 @@
+
+
+- `vboxmanage_post` ([][]string) - Identical to vboxmanage,
+ except that it is run after the virtual machine is shutdown, and before the
+ virtual machine is exported.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/virtualbox/common/_VBoxVersionConfig-not-required.html.md b/website/source/partials/builder/virtualbox/common/_VBoxVersionConfig-not-required.html.md
new file mode 100644
index 000000000..e7f297bf3
--- /dev/null
+++ b/website/source/partials/builder/virtualbox/common/_VBoxVersionConfig-not-required.html.md
@@ -0,0 +1,10 @@
+
+
+- `communicator` (string) - Communicator
+- `virtualbox_version_file` (*string) - The path within the virtual machine to
+ upload a file that contains the VirtualBox version that was used to create
+ the machine. This information can be useful for provisioning. By default
+ this is .vbox_version, which will generally be upload it into the
+ home directory. Set to an empty string to skip uploading this file, which
+ can be useful when using the none communicator.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/virtualbox/iso/_Config-not-required.html.md b/website/source/partials/builder/virtualbox/iso/_Config-not-required.html.md
new file mode 100644
index 000000000..3cf87b358
--- /dev/null
+++ b/website/source/partials/builder/virtualbox/iso/_Config-not-required.html.md
@@ -0,0 +1,77 @@
+
+
+- `disk_size` (uint) - The size, in megabytes, of the hard disk to create
+ for the VM. By default, this is 40000 (about 40 GB).
+
+- `guest_additions_mode` (string) - The method by which guest additions are
+ made available to the guest for installation. Valid options are upload,
+ attach, or disable. If the mode is attach the guest additions ISO will
+ be attached as a CD device to the virtual machine. If the mode is upload
+ the guest additions ISO will be uploaded to the path specified by
+ guest_additions_path. The default value is upload. If disable is used,
+ guest additions won't be downloaded, either.
+
+- `guest_additions_path` (string) - The path on the guest virtual machine
+ where the VirtualBox guest additions ISO will be uploaded. By default this
+ is VBoxGuestAdditions.iso which should upload into the login directory of
+ the user. This is a configuration
+ template where the Version
+ variable is replaced with the VirtualBox version.
+
+- `guest_additions_sha256` (string) - The SHA256 checksum of the guest
+ additions ISO that will be uploaded to the guest VM. By default the
+ checksums will be downloaded from the VirtualBox website, so this only needs
+ to be set if you want to be explicit about the checksum.
+
+- `guest_additions_url` (string) - The URL to the guest additions ISO
+ to upload. This can also be a file URL if the ISO is at a local path. By
+ default, the VirtualBox builder will attempt to find the guest additions ISO
+ on the local file system. If it is not available locally, the builder will
+ download the proper guest additions ISO from the internet.
+
+- `guest_additions_interface` (string) - The interface type to use to mount
+ guest additions when guest_additions_mode is set to attach. Will
+ default to the value set in iso_interface, if iso_interface is set.
+ Will default to "ide", if iso_interface is not set. Options are "ide" and
+ "sata".
+
+- `guest_os_type` (string) - The guest OS type being installed. By default
+ this is other, but you can get dramatic performance improvements by
+ setting this to the proper value. To view all available values for this run
+ VBoxManage list ostypes. Setting the correct value hints to VirtualBox how
+ to optimize the virtual hardware to work best with that operating system.
+
+- `hard_drive_discard` (bool) - When this value is set to true, a VDI
+ image will be shrunk in response to the trim command from the guest OS.
+ The size of the cleared area must be at least 1MB. Also set
+ hard_drive_nonrotational to true to enable TRIM support.
+
+- `hard_drive_interface` (string) - The type of controller that the primary
+ hard drive is attached to, defaults to ide. When set to sata, the drive
+ is attached to an AHCI SATA controller. When set to scsi, the drive is
+ attached to an LsiLogic SCSI controller.
+
+- `sata_port_count` (int) - The number of ports available on any SATA
+ controller created, defaults to 1. VirtualBox supports up to 30 ports on a
+ maximum of 1 SATA controller. Increasing this value can be useful if you
+ want to attach additional drives.
+
+- `hard_drive_nonrotational` (bool) - Forces some guests (i.e. Windows 7+)
+ to treat disks as SSDs and stops them from performing disk fragmentation.
+ Also set hard_drive_discard to true to enable TRIM support.
+
+- `iso_interface` (string) - The type of controller that the ISO is attached
+ to, defaults to ide. When set to sata, the drive is attached to an AHCI
+ SATA controller.
+
+- `keep_registered` (bool) - Set this to true if you would like to keep
+ the VM registered with virtualbox. Defaults to false.
+
+- `skip_export` (bool) - Defaults to false. When enabled, Packer will
+ not export the VM. Useful if the build output is not the resultant image,
+ but created inside the VM.
+
+- `vm_name` (string) - This is the name of the OVF file for the new virtual
+ machine, without the file extension. By default this is packer-BUILDNAME,
+ where "BUILDNAME" is the name of the build.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/virtualbox/ovf/_Config-not-required.html.md b/website/source/partials/builder/virtualbox/ovf/_Config-not-required.html.md
new file mode 100644
index 000000000..5f6a98c80
--- /dev/null
+++ b/website/source/partials/builder/virtualbox/ovf/_Config-not-required.html.md
@@ -0,0 +1,63 @@
+
+
+- `checksum_type` (string) - The type of the checksum specified in checksum.
+ Valid values are none, md5, sha1, sha256, or sha512. Although the
+ checksum will not be verified when checksum_type is set to "none", this is
+ not recommended since OVA files can be very large and corruption does happen
+ from time to time.
+
+- `guest_additions_mode` (string) - The method by which guest additions are
+ made available to the guest for installation. Valid options are upload,
+ attach, or disable. If the mode is attach the guest additions ISO will
+ be attached as a CD device to the virtual machine. If the mode is upload
+ the guest additions ISO will be uploaded to the path specified by
+ guest_additions_path. The default value is upload. If disable is used,
+ guest additions won't be downloaded, either.
+
+- `guest_additions_path` (string) - The path on the guest virtual machine
+ where the VirtualBox guest additions ISO will be uploaded. By default this
+ is VBoxGuestAdditions.iso which should upload into the login directory of
+ the user. This is a configuration
+ template where the Version
+ variable is replaced with the VirtualBox version.
+
+- `guest_additions_interface` (string) - The interface type to use to mount
+ guest additions when guest_additions_mode is set to attach. Will
+ default to the value set in iso_interface, if iso_interface is set.
+ Will default to "ide", if iso_interface is not set. Options are "ide" and
+ "sata".
+
+- `guest_additions_sha256` (string) - The SHA256 checksum of the guest
+ additions ISO that will be uploaded to the guest VM. By default the
+ checksums will be downloaded from the VirtualBox website, so this only needs
+ to be set if you want to be explicit about the checksum.
+
+- `guest_additions_url` (string) - The URL to the guest additions ISO
+ to upload. This can also be a file URL if the ISO is at a local path. By
+ default, the VirtualBox builder will attempt to find the guest additions ISO
+ on the local file system. If it is not available locally, the builder will
+ download the proper guest additions ISO from the internet.
+
+- `import_flags` ([]string) - Additional flags to pass to
+ VBoxManage import. This can be used to add additional command-line flags
+ such as --eula-accept to accept a EULA in the OVF.
+
+- `import_opts` (string) - Additional options to pass to the
+ VBoxManage import. This can be useful for passing keepallmacs or
+ keepnatmacs options for existing ovf images.
+
+- `target_path` (string) - The path where the OVA should be saved
+ after download. By default, it will go in the packer cache, with a hash of
+ the original filename as its name.
+
+- `vm_name` (string) - This is the name of the OVF file for the new virtual
+ machine, without the file extension. By default this is packer-BUILDNAME,
+ where "BUILDNAME" is the name of the build.
+
+- `keep_registered` (bool) - Set this to true if you would like to keep
+ the VM registered with virtualbox. Defaults to false.
+
+- `skip_export` (bool) - Defaults to false. When enabled, Packer will
+ not export the VM. Useful if the build output is not the resultant image,
+ but created inside the VM.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/virtualbox/ovf/_Config-required.html.md b/website/source/partials/builder/virtualbox/ovf/_Config-required.html.md
new file mode 100644
index 000000000..90f3e8359
--- /dev/null
+++ b/website/source/partials/builder/virtualbox/ovf/_Config-required.html.md
@@ -0,0 +1,12 @@
+
+
+- `checksum` (string) - The checksum for the source_path file. The
+ algorithm to use when computing the checksum can be optionally specified
+ with checksum_type. When checksum_type is not set packer will guess the
+ checksumming type based on checksum length. checksum can be also be a
+ file or an URL, in which case checksum_type must be set to file; the
+ go-getter will download it and use the first hash found.
+
+- `source_path` (string) - The path to an OVF or OVA file that acts as the
+ source of this build. This currently must be a local file.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/vmware/common/_DriverConfig-not-required.html.md b/website/source/partials/builder/vmware/common/_DriverConfig-not-required.html.md
new file mode 100644
index 000000000..65e403370
--- /dev/null
+++ b/website/source/partials/builder/vmware/common/_DriverConfig-not-required.html.md
@@ -0,0 +1,38 @@
+
+
+- `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this is
+ /Applications/VMware Fusion.app but this setting allows you to
+ customize this.
+
+- `remote_type` (string) - The type of remote machine that will be used to
+ build this VM rather than a local desktop product. The only value accepted
+ for this currently is esx5. If this is not set, a desktop product will
+ be used. By default, this is not set.
+
+- `remote_datastore` (string) - The path to the datastore where the VM will be stored
+ on the ESXi machine.
+
+- `remote_cache_datastore` (string) - The path to the datastore where supporting files
+ will be stored during the build on the remote machine.
+
+- `remote_cache_directory` (string) - The path where the ISO and/or floppy files will
+ be stored during the build on the remote machine. The path is relative to
+ the remote_cache_datastore on the remote machine.
+
+- `remote_host` (string) - The host of the remote machine used for access.
+ This is only required if remote_type is enabled.
+
+- `remote_port` (int) - The SSH port of the remote machine
+
+- `remote_username` (string) - The SSH username used to access the remote machine.
+
+- `remote_password` (string) - The SSH password for access to the remote machine.
+
+- `remote_private_key_file` (string) - The SSH key for access to the remote machine.
+
+- `skip_validate_credentials` (bool) - When Packer is preparing to run a
+ remote esxi build, and export is not disable, by default it runs a no-op
+ ovftool command to make sure that the remote_username and remote_password
+ given are valid. If you set this flag to true, Packer will skip this
+ validation. Default: false.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/vmware/common/_ExportConfig-not-required.html.md b/website/source/partials/builder/vmware/common/_ExportConfig-not-required.html.md
new file mode 100644
index 000000000..80efaedec
--- /dev/null
+++ b/website/source/partials/builder/vmware/common/_ExportConfig-not-required.html.md
@@ -0,0 +1,41 @@
+
+
+- `format` (string) - Either "ovf", "ova" or "vmx", this specifies the output
+ format of the exported virtual machine. This defaults to "ovf".
+ Before using this option, you need to install ovftool. This option
+ currently only works when option remote_type is set to "esx5".
+ Since ovftool is only capable of password based authentication
+ remote_password must be set when exporting the VM.
+
+- `ovftool_options` ([]string) - Extra options to pass to ovftool
+ during export. Each item in the array is a new argument. The options
+ --noSSLVerify, --skipManifestCheck, and --targetType are reserved,
+ and should not be passed to this argument.
+ Currently, exporting the build VM (with ovftool) is only supported when
+ building on ESXi e.g. when remote_type is set to esx5. See the
+ Building on a Remote vSphere
+ Hypervisor
+ section below for more info.
+
+- `skip_export` (bool) - Defaults to false. When enabled, Packer will
+ not export the VM. Useful if the build output is not the resultant
+ image, but created inside the VM.
+ Currently, exporting the build VM is only supported when building on
+ ESXi e.g. when remote_type is set to esx5. See the Building on a
+ Remote vSphere
+ Hypervisor
+ section below for more info.
+
+- `keep_registered` (bool) - Set this to true if you would like to keep
+ the VM registered with the remote ESXi server. If you do not need to export
+ the vm, then also set skip_export: true in order to avoid an unnecessary
+ step of using ovftool to export the vm. Defaults to false.
+
+- `skip_compaction` (bool) - VMware-created disks are defragmented and
+ compacted at the end of the build process using vmware-vdiskmanager or
+ vmkfstools in ESXi. In certain rare cases, this might actually end up
+ making the resulting disks slightly larger. If you find this to be the case,
+ you can disable compaction using this configuration value. Defaults to
+ false. Default to true for ESXi when disk_type_id is not explicitly
+ defined and false otherwise.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/vmware/common/_HWConfig-not-required.html.md b/website/source/partials/builder/vmware/common/_HWConfig-not-required.html.md
new file mode 100644
index 000000000..b61851fd3
--- /dev/null
+++ b/website/source/partials/builder/vmware/common/_HWConfig-not-required.html.md
@@ -0,0 +1,38 @@
+
+
+- `cpus` (int) - The number of cpus to use when building the VM.
+
+- `memory` (int) - The amount of memory to use when building the VM
+ in megabytes.
+
+- `cores` (int) - The number of cores per socket to use when building the VM.
+ This corresponds to the cpuid.coresPerSocket option in the .vmx file.
+
+- `network` (string) - This is the network type that the virtual machine will
+ be created with. This can be one of the generic values that map to a device
+ such as hostonly, nat, or bridged. If the network is not one of these
+ values, then it is assumed to be a VMware network device. (VMnet0..x)
+
+- `network_adapter_type` (string) - This is the ethernet adapter type the the
+ virtual machine will be created with. By default the e1000 network adapter
+ type will be used by Packer. For more information, please consult the
+
+ Choosing a network adapter for your virtual machine for desktop VMware
+ clients. For ESXi, refer to the proper ESXi documentation.
+
+- `sound` (bool) - Specify whether to enable VMware's virtual soundcard
+ device when building the VM. Defaults to false.
+
+- `usb` (bool) - Enable VMware's USB bus when building the guest VM.
+ Defaults to false. To enable usage of the XHCI bus for USB 3 (5 Gbit/s),
+ one can use the vmx_data option to enable it by specifying true for
+ the usb_xhci.present property.
+
+- `serial` (string) - This specifies a serial port to add to the VM.
+ It has a format of Type:option1,option2,.... The field Type can be one
+ of the following values: FILE, DEVICE, PIPE, AUTO, or NONE.
+
+- `parallel` (string) - This specifies a parallel port to add to the VM. It
+ has the format of Type:option1,option2,.... Type can be one of the
+ following values: FILE, DEVICE, AUTO, or NONE.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/vmware/common/_OutputConfig-not-required.html.md b/website/source/partials/builder/vmware/common/_OutputConfig-not-required.html.md
new file mode 100644
index 000000000..99deb6e10
--- /dev/null
+++ b/website/source/partials/builder/vmware/common/_OutputConfig-not-required.html.md
@@ -0,0 +1,9 @@
+
+
+- `output_directory` (string) - This is the path to the directory where the
+ resulting virtual machine will be created. This may be relative or absolute.
+ If relative, the path is relative to the working directory when packer
+ is executed. This directory must not exist or be empty prior to running
+ the builder. By default this is output-BUILDNAME where "BUILDNAME" is the
+ name of the build.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/vmware/common/_RunConfig-not-required.html.md b/website/source/partials/builder/vmware/common/_RunConfig-not-required.html.md
new file mode 100644
index 000000000..e3226dca6
--- /dev/null
+++ b/website/source/partials/builder/vmware/common/_RunConfig-not-required.html.md
@@ -0,0 +1,25 @@
+
+
+- `headless` (bool) - Packer defaults to building VMware virtual machines
+ by launching a GUI that shows the console of the machine being built. When
+ this value is set to true, the machine will start without a console. For
+ VMware machines, Packer will output VNC connection information in case you
+ need to connect to the console to debug the build process.
+
+- `vnc_bind_address` (string) - The IP address that should be
+ binded to for VNC. By default packer will use 127.0.0.1 for this. If you
+ wish to bind to all interfaces use 0.0.0.0.
+
+- `vnc_port_min` (int) - The minimum and maximum port
+ to use for VNC access to the virtual machine. The builder uses VNC to type
+ the initial boot_command. Because Packer generally runs in parallel,
+ Packer uses a randomly chosen port in this range that appears available. By
+ default this is 5900 to 6000. The minimum and maximum ports are
+ inclusive.
+
+- `vnc_port_max` (int) - VNC Port Max
+- `vnc_disable_password` (bool) - Don't auto-generate a VNC password that
+ is used to secure the VNC communication with the VM. This must be set to
+ true if building on ESXi 6.5 and 6.7 with VNC enabled. Defaults to
+ false.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/vmware/common/_ShutdownConfig-not-required.html.md b/website/source/partials/builder/vmware/common/_ShutdownConfig-not-required.html.md
new file mode 100644
index 000000000..ec9a2a69e
--- /dev/null
+++ b/website/source/partials/builder/vmware/common/_ShutdownConfig-not-required.html.md
@@ -0,0 +1,11 @@
+
+
+- `shutdown_command` (string) - The command to use to gracefully shut down the
+ machine once all the provisioning is done. By default this is an empty
+ string, which tells Packer to just forcefully shut down the machine.
+
+- `shutdown_timeout` (string) - The amount of time to wait after executing the
+ shutdown_command for the virtual machine to actually shut down. If it
+ doesn't shut down in this time, it is an error. By default, the timeout is
+ 5m or five minutes.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/vmware/common/_ToolsConfig-not-required.html.md b/website/source/partials/builder/vmware/common/_ToolsConfig-not-required.html.md
new file mode 100644
index 000000000..aa47a1074
--- /dev/null
+++ b/website/source/partials/builder/vmware/common/_ToolsConfig-not-required.html.md
@@ -0,0 +1,14 @@
+
+
+- `tools_upload_flavor` (string) - The flavor of the VMware Tools ISO to
+ upload into the VM. Valid values are darwin, linux, and windows. By
+ default, this is empty, which means VMware tools won't be uploaded.
+
+- `tools_upload_path` (string) - The path in the VM to upload the
+ VMware tools. This only takes effect if tools_upload_flavor is non-empty.
+ This is a configuration
+ template that has a single
+ valid variable: Flavor, which will be the value of tools_upload_flavor.
+ By default the upload path is set to {{.Flavor}}.iso. This setting is not
+ used when remote_type is esx5.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/vmware/common/_VMXConfig-not-required.html.md b/website/source/partials/builder/vmware/common/_VMXConfig-not-required.html.md
new file mode 100644
index 000000000..5fd4a9142
--- /dev/null
+++ b/website/source/partials/builder/vmware/common/_VMXConfig-not-required.html.md
@@ -0,0 +1,23 @@
+
+
+- `vmx_data` (map[string]string) - Arbitrary key/values to enter
+ into the virtual machine VMX file. This is for advanced users who want to
+ set properties that aren't yet supported by the builder.
+
+- `vmx_data_post` (map[string]string) - Identical to vmx_data,
+ except that it is run after the virtual machine is shutdown, and before the
+ virtual machine is exported.
+
+- `vmx_remove_ethernet_interfaces` (bool) - Remove all ethernet interfaces
+ from the VMX file after building. This is for advanced users who understand
+ the ramifications, but is useful for building Vagrant boxes since Vagrant
+ will create ethernet interfaces when provisioning a box. Defaults to
+ false.
+
+- `display_name` (string) - The name that will appear in your vSphere client,
+ and will be used for the vmx basename. This will override the "displayname"
+ value in your vmx file. It will also override the "displayname" if you have
+ set it in the "vmx_data" Packer option. This option is useful if you are
+ chaining vmx builds and want to make sure that the display name of each step
+ in the chain is unique.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/vmware/iso/_Config-not-required.html.md b/website/source/partials/builder/vmware/iso/_Config-not-required.html.md
new file mode 100644
index 000000000..b6ed80e34
--- /dev/null
+++ b/website/source/partials/builder/vmware/iso/_Config-not-required.html.md
@@ -0,0 +1,65 @@
+
+
+- `disk_additional_size` ([]uint) - The size(s) of any additional
+ hard disks for the VM in megabytes. If this is not specified then the VM
+ will only contain a primary hard disk. The builder uses expandable, not
+ fixed-size virtual hard disks, so the actual file representing the disk will
+ not use the full size unless it is full.
+
+- `disk_adapter_type` (string) - The adapter type of the VMware virtual disk
+ to create. This option is for advanced usage, modify only if you know what
+ you're doing. Some of the options you can specify are ide, sata, nvme
+ or scsi (which uses the "lsilogic" scsi interface by default). If you
+ specify another option, Packer will assume that you're specifying a scsi
+ interface of that specified type. For more information, please consult the
+
+ Virtual Disk Manager User's Guide for desktop VMware clients.
+ For ESXi, refer to the proper ESXi documentation.
+
+- `vmdk_name` (string) - The filename of the virtual disk that'll be created,
+ without the extension. This defaults to packer.
+
+- `disk_size` (uint) - The size of the hard disk for the VM in megabytes.
+ The builder uses expandable, not fixed-size virtual hard disks, so the
+ actual file representing the disk will not use the full size unless it
+ is full. By default this is set to 40000 (about 40 GB).
+
+- `disk_type_id` (string) - The type of VMware virtual disk to create. This
+ option is for advanced usage.
+
+- `format` (string) - Either "ovf", "ova" or "vmx", this specifies the output
+ format of the exported virtual machine. This defaults to "ovf".
+ Before using this option, you need to install ovftool. This option
+ currently only works when option remote_type is set to "esx5".
+ Since ovftool is only capable of password based authentication
+ remote_password must be set when exporting the VM.
+
+- `cdrom_adapter_type` (string) - The adapter type (or bus) that will be used
+ by the cdrom device. This is chosen by default based on the disk adapter
+ type. VMware tends to lean towards ide for the cdrom device unless
+ sata is chosen for the disk adapter and so Packer attempts to mirror
+ this logic. This field can be specified as either ide, sata, or scsi.
+
+- `guest_os_type` (string) - The guest OS type being installed. This will be
+ set in the VMware VMX. By default this is other. By specifying a more
+ specific OS type, VMware may perform some optimizations or virtual hardware
+ changes to better support the operating system running in the
+ virtual machine.
+
+- `version` (string) - The vmx hardware
+ version
+ for the new virtual machine. Only the default value has been tested, any
+ other value is experimental. Default value is 9.
+
+- `vm_name` (string) - This is the name of the VMX file for the new virtual
+ machine, without the file extension. By default this is packer-BUILDNAME,
+ where "BUILDNAME" is the name of the build.
+
+- `vmx_disk_template_path` (string) - VMX Disk Template Path
+- `vmx_template_path` (string) - Path to a configuration
+ template that defines the
+ contents of the virtual machine VMX file for VMware. This is for advanced
+ users only as this can render the virtual machine non-functional. See
+ below for more information. For basic VMX modifications, try
+ vmx_data first.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/vmware/vmx/_Config-not-required.html.md b/website/source/partials/builder/vmware/vmx/_Config-not-required.html.md
new file mode 100644
index 000000000..1665c6896
--- /dev/null
+++ b/website/source/partials/builder/vmware/vmx/_Config-not-required.html.md
@@ -0,0 +1,15 @@
+
+
+- `linked` (bool) - By default Packer creates a 'full' clone of
+ the virtual machine specified in source_path. The resultant virtual
+ machine is fully independant from the parent it was cloned from.
+
+- `remote_type` (string) - The type of remote machine that will be used to
+ build this VM rather than a local desktop product. The only value accepted
+ for this currently is esx5. If this is not set, a desktop product will
+ be used. By default, this is not set.
+
+- `vm_name` (string) - This is the name of the VMX file for the new virtual
+ machine, without the file extension. By default this is packer-BUILDNAME,
+ where "BUILDNAME" is the name of the build.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/vmware/vmx/_Config-required.html.md b/website/source/partials/builder/vmware/vmx/_Config-required.html.md
new file mode 100644
index 000000000..ddeb7ebbe
--- /dev/null
+++ b/website/source/partials/builder/vmware/vmx/_Config-required.html.md
@@ -0,0 +1,5 @@
+
+
+- `source_path` (string) - Path to the source VMX file to clone. If
+ remote_type is enabled then this specifies a path on the remote_host.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/yandex/_Config-not-required.html.md b/website/source/partials/builder/yandex/_Config-not-required.html.md
new file mode 100644
index 000000000..2deb12bba
--- /dev/null
+++ b/website/source/partials/builder/yandex/_Config-not-required.html.md
@@ -0,0 +1,67 @@
+
+
+- `endpoint` (string) - Non standard api endpoint URL.
+
+- `service_account_key_file` (string) - Path to file with Service Account key in json format. This
+ is an alternative method to authenticate to Yandex.Cloud. Alternatively you may set environment variable
+ YC_SERVICE_ACCOUNT_KEY_FILE.
+
+- `disk_name` (string) - The name of the disk, if unset the instance name
+ will be used.
+
+- `disk_size_gb` (int) - The size of the disk in GB. This defaults to 10, which is 10GB.
+
+- `disk_type` (string) - Specify disk type for the launched instance. Defaults to network-hdd.
+
+- `image_description` (string) - The description of the resulting image.
+
+- `image_family` (string) - The family name of the resulting image.
+
+- `image_labels` (map[string]string) - Key/value pair labels to
+ apply to the created image.
+
+- `image_name` (string) - The unique name of the resulting image. Defaults to
+ packer-{{timestamp}}.
+
+- `image_product_ids` ([]string) - License IDs that indicate which licenses are attached to resulting image.
+
+- `instance_cores` (int) - The number of cores available to the instance.
+
+- `instance_mem_gb` (int) - The amount of memory available to the instance, specified in gigabytes.
+
+- `instance_name` (string) - The name assigned to the instance.
+
+- `labels` (map[string]string) - Key/value pair labels to apply to
+ the launched instance.
+
+- `platform_id` (string) - Identifier of the hardware platform configuration for the instance. This defaults to standard-v1.
+
+- `metadata` (map[string]string) - Metadata applied to the launched
+ instance.
+
+- `serial_log_file` (string) - File path to save serial port output of the launched instance.
+
+- `source_image_folder_id` (string) - The ID of the folder containing the source image.
+
+- `source_image_id` (string) - The source image ID to use to create the new image
+ from.
+
+- `subnet_id` (string) - The Yandex VPC subnet id to use for
+ the launched instance. Note, the zone of the subnet must match the
+ zone in which the VM is launched.
+
+- `use_ipv4_nat` (bool) - If set to true, then launched instance will have external internet
+ access.
+
+- `use_ipv6` (bool) - Set to true to enable IPv6 for the instance being
+ created. This defaults to false, or not enabled.
+ -> Note: ~> Usage of IPv6 will be available in the future.
+
+- `use_internal_ip` (bool) - If true, use the instance's internal IP address
+ instead of its external IP during building.
+
+- `zone` (string) - The name of the zone to launch the instance. This defaults to ru-central1-a.
+
+- `state_timeout` (time.Duration) - The time to wait for instance state changes.
+ Defaults to 5m.
+
\ No newline at end of file
diff --git a/website/source/partials/builder/yandex/_Config-required.html.md b/website/source/partials/builder/yandex/_Config-required.html.md
new file mode 100644
index 000000000..9d65be8ae
--- /dev/null
+++ b/website/source/partials/builder/yandex/_Config-required.html.md
@@ -0,0 +1,12 @@
+
+
+- `folder_id` (string) - The folder ID that will be used to launch instances and store images.
+ Alternatively you may set value by environment variable YC_FOLDER_ID.
+
+- `token` (string) - OAuth token to use to authenticate to Yandex.Cloud. Alternatively you may set
+ value by environment variable YC_TOKEN.
+
+- `source_image_family` (string) - The source image family to create the new image
+ from. You can also specify source_image_id instead. Just one of a source_image_id or
+ source_image_family must be specified. Example: ubuntu-1804-lts
+
\ No newline at end of file
diff --git a/website/source/partials/builders/_building_on_remote_vsphere_hypervisor.html.md b/website/source/partials/builders/_building_on_remote_vsphere_hypervisor.html.md
index 4725661cd..8c04d4abb 100644
--- a/website/source/partials/builders/_building_on_remote_vsphere_hypervisor.html.md
+++ b/website/source/partials/builders/_building_on_remote_vsphere_hypervisor.html.md
@@ -30,35 +30,37 @@ Packer builds on; a vMotion event will cause the Packer build to fail.
To use a remote VMware vSphere Hypervisor to build your virtual machine, fill in
the required `remote_*` configurations:
-### Required:
+- `remote_type` - This must be set to "esx5".
-- `remote_type` (string) - This must be set to "esx5".
+- `remote_host` - The host of the remote machine.
-- `remote_host` (string) - The host of the remote machine.
+Additionally, there are some optional configurations that you'll likely have to
+modify as well:
-### Optional:
+- `remote_port` - The SSH port of the remote machine
-- `remote_port` (int) - The SSH port of the remote machine
-
-- `remote_datastore` (string) - The path to the datastore where the VM will be stored
+- `remote_datastore` - The path to the datastore where the VM will be stored
on the ESXi machine.
-- `remote_cache_datastore` (string) - The path to the datastore where supporting files
+- `remote_cache_datastore` - The path to the datastore where supporting files
will be stored during the build on the remote machine.
-- `remote_cache_directory` (string) - The path where the ISO and/or floppy files will
+- `remote_cache_directory` - The path where the ISO and/or floppy files will
be stored during the build on the remote machine. The path is relative to
the `remote_cache_datastore` on the remote machine.
-- `remote_username` (string) - The SSH username used to access the remote machine.
+- `remote_username` - The SSH username used to access the remote machine.
-- `remote_password` (string) - The SSH password for access to the remote machine.
+- `remote_password` - The SSH password for access to the remote machine.
-- `remote_private_key_file` (string) - The SSH key for access to the remote machine.
+- `remote_private_key_file` - The SSH key for access to the remote machine.
-- `format` (string) (string) - Either "ovf", "ova" or "vmx", this specifies the output
+- `format` (string) - Either "ovf", "ova" or "vmx", this specifies the output
format of the exported virtual machine. This defaults to "ovf".
Before using this option, you need to install `ovftool`. This option
currently only works when option remote_type is set to "esx5".
Since ovftool is only capable of password based authentication
`remote_password` must be set when exporting the VM.
+
+- `vnc_disable_password` - This must be set to "true" when using VNC with
+ ESXi 6.5 or 6.7.
\ No newline at end of file
diff --git a/website/source/partials/helper/communicator/_Config-not-required.html.md b/website/source/partials/helper/communicator/_Config-not-required.html.md
new file mode 100644
index 000000000..e994c8fe1
--- /dev/null
+++ b/website/source/partials/helper/communicator/_Config-not-required.html.md
@@ -0,0 +1,36 @@
+
+
+- `communicator` (string) - Packer currently supports three kinds of communicators:
+
+ - `none` - No communicator will be used. If this is set, most
+ provisioners also can't be used.
+
+ - `ssh` - An SSH connection will be established to the machine. This
+ is usually the default.
+
+ - `winrm` - A WinRM connection will be established.
+
+ In addition to the above, some builders have custom communicators they
+ can use. For example, the Docker builder has a "docker" communicator
+ that uses `docker exec` and `docker cp` to execute scripts and copy
+ files.
+
+- `pause_before_connecting` (time.Duration) - We recommend that you enable SSH or WinRM as the very last step in your
+ guest's bootstrap script, but sometimes you may have a race condition where
+ you need Packer to wait before attempting to connect to your guest.
+
+ If you end up in this situation, you can use the template option
+ `pause_before_connecting`. By default, there is no pause. For example:
+
+ ```json
+ {
+ "communicator": "ssh",
+ "ssh_username": "myuser",
+ "pause_before_connecting": "10m"
+ }
+ ```
+
+ In this example, Packer will check whether it can connect, as normal. But once
+ a connection attempt is successful, it will disconnect and then wait 10 minutes
+ before connecting to the guest and beginning provisioning.
+
\ No newline at end of file
diff --git a/website/source/partials/helper/communicator/_SSH-not-required.html.md b/website/source/partials/helper/communicator/_SSH-not-required.html.md
new file mode 100644
index 000000000..aa63682f1
--- /dev/null
+++ b/website/source/partials/helper/communicator/_SSH-not-required.html.md
@@ -0,0 +1,98 @@
+
+
+- `ssh_host` (string) - The address to SSH to. This usually is automatically configured by the
+ builder.
+
+- `ssh_port` (int) - The port to connect to SSH. This defaults to `22`.
+
+- `ssh_username` (string) - The username to connect to SSH with. Required if using SSH.
+
+- `ssh_password` (string) - A plaintext password to use to authenticate with SSH.
+
+- `ssh_keypair_name` (string) - If specified, this is the key that will be used for SSH with the
+ machine. The key must match a key pair name loaded up into Amazon EC2.
+ By default, this is blank, and Packer will generate a temporary keypair
+ unless [`ssh_password`](../templates/communicator.html#ssh_password) is
+ used.
+ [`ssh_private_key_file`](../templates/communicator.html#ssh_private_key_file)
+ or `ssh_agent_auth` must be specified when `ssh_keypair_name` is
+ utilized.
+
+- `temporary_key_pair_name` (string) - SSH Temporary Key Pair Name
+- `ssh_clear_authorized_keys` (bool) - If true, Packer will attempt to remove its temporary key from
+ `~/.ssh/authorized_keys` and `/root/.ssh/authorized_keys`. This is a
+ mostly cosmetic option, since Packer will delete the temporary private
+ key from the host system regardless of whether this is set to true
+ (unless the user has set the `-debug` flag). Defaults to "false";
+ currently only works on guests with `sed` installed.
+
+- `ssh_private_key_file` (string) - Path to a PEM encoded private key file to use to authenticate with SSH.
+ The `~` can be used in path and will be expanded to the home directory
+ of current user.
+
+- `ssh_interface` (string) - One of `public_ip`, `private_ip`, `public_dns`, or `private_dns`. If
+ set, either the public IP address, private IP address, public DNS name
+ or private DNS name will used as the host for SSH. The default behaviour
+ if inside a VPC is to use the public IP address if available, otherwise
+ the private IP address will be used. If not in a VPC the public DNS name
+ will be used. Also works for WinRM.
+
+ Where Packer is configured for an outbound proxy but WinRM traffic
+ should be direct, `ssh_interface` must be set to `private_dns` and
+ `.compute.internal` included in the `NO_PROXY` environment
+ variable.
+
+- `ssh_ip_version` (string) - SSHIP Version
+- `ssh_pty` (bool) - If `true`, a PTY will be requested for the SSH connection. This defaults
+ to `false`.
+
+- `ssh_timeout` (time.Duration) - The time to wait for SSH to become available. Packer uses this to
+ determine when the machine has booted so this is usually quite long.
+ Example value: `10m`.
+
+- `ssh_agent_auth` (bool) - If true, the local SSH agent will be used to authenticate connections to
+ the source instance. No temporary keypair will be created, and the
+ values of `ssh_password` and `ssh_private_key_file` will be ignored. To
+ use this option with a key pair already configured in the source AMI,
+ leave the `ssh_keypair_name` blank. To associate an existing key pair in
+ AWS with the source instance, set the `ssh_keypair_name` field to the
+ name of the key pair.
+
+- `ssh_disable_agent_forwarding` (bool) - If true, SSH agent forwarding will be disabled. Defaults to `false`.
+
+- `ssh_handshake_attempts` (int) - The number of handshakes to attempt with SSH once it can connect. This
+ defaults to `10`.
+
+- `ssh_bastion_host` (string) - A bastion host to use for the actual SSH connection.
+
+- `ssh_bastion_port` (int) - The port of the bastion host. Defaults to `22`.
+
+- `ssh_bastion_agent_auth` (bool) - If `true`, the local SSH agent will be used to authenticate with the
+ bastion host. Defaults to `false`.
+
+- `ssh_bastion_username` (string) - The username to connect to the bastion host.
+
+- `ssh_bastion_password` (string) - The password to use to authenticate with the bastion host.
+
+- `ssh_bastion_private_key_file` (string) - Path to a PEM encoded private key file to use to authenticate with the
+ bastion host. The `~` can be used in path and will be expanded to the
+ home directory of current user.
+
+- `ssh_file_transfer_method` (string) - `scp` or `sftp` - How to transfer files, Secure copy (default) or SSH
+ File Transfer Protocol.
+
+- `ssh_proxy_host` (string) - A SOCKS proxy host to use for SSH connection
+
+- `ssh_proxy_port` (int) - A port of the SOCKS proxy. Defaults to `1080`.
+
+- `ssh_proxy_username` (string) - The optional username to authenticate with the proxy server.
+
+- `ssh_proxy_password` (string) - The optional password to use to authenticate with the proxy server.
+
+- `ssh_keep_alive_interval` (time.Duration) - How often to send "keep alive" messages to the server. Set to a negative
+ value (`-1s`) to disable. Example value: `10s`. Defaults to `5s`.
+
+- `ssh_read_write_timeout` (time.Duration) - The amount of time to wait for a remote command to end. This might be
+ useful if, for example, packer hangs on a connection after a reboot.
+ Example: `5m`. Disabled by default.
+
\ No newline at end of file
diff --git a/website/source/partials/helper/communicator/_WinRM-not-required.html.md b/website/source/partials/helper/communicator/_WinRM-not-required.html.md
new file mode 100644
index 000000000..aac408a0b
--- /dev/null
+++ b/website/source/partials/helper/communicator/_WinRM-not-required.html.md
@@ -0,0 +1,29 @@
+
+
+- `winrm_username` (string) - The username to use to connect to WinRM.
+
+- `winrm_password` (string) - The password to use to connect to WinRM.
+
+- `winrm_host` (string) - The address for WinRM to connect to.
+
+ NOTE: If using an Amazon EBS builder, you can specify the interface
+ WinRM connects to via
+ [`ssh_interface`](https://www.packer.io/docs/builders/amazon-ebs.html#ssh_interface)
+
+- `winrm_port` (int) - The WinRM port to connect to. This defaults to `5985` for plain
+ unencrypted connection and `5986` for SSL when `winrm_use_ssl` is set to
+ true.
+
+- `winrm_timeout` (time.Duration) - The amount of time to wait for WinRM to become available. This defaults
+ to `30m` since setting up a Windows machine generally takes a long time.
+
+- `winrm_use_ssl` (bool) - If `true`, use HTTPS for WinRM.
+
+- `winrm_insecure` (bool) - If `true`, do not check server certificate chain and host name.
+
+- `winrm_use_ntlm` (bool) - If `true`, NTLMv2 authentication (with session security) will be used
+ for WinRM, rather than default (basic authentication), removing the
+ requirement for basic authentication to be enabled within the target
+ guest. Further reading for remote connection authentication can be found
+ [here](https://msdn.microsoft.com/en-us/library/aa384295(v=vs.85).aspx).
+
\ No newline at end of file
From ee716d3f7ee28de8758ae753002bf2c90ff8a691 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Thu, 6 Jun 2019 16:40:39 +0200
Subject: [PATCH 27/97] up go mod, go mod vendor & go mod tidy
---
go.mod | 20 +-
go.sum | 74 +-
.../PuerkitoBio/goquery/.gitattributes | 1 +
.../github.com/PuerkitoBio/goquery/.gitignore | 16 +
.../PuerkitoBio/goquery/.travis.yml | 16 +
vendor/github.com/PuerkitoBio/goquery/LICENSE | 12 +
.../github.com/PuerkitoBio/goquery/README.md | 179 +++
.../github.com/PuerkitoBio/goquery/array.go | 124 ++
vendor/github.com/PuerkitoBio/goquery/doc.go | 123 ++
.../github.com/PuerkitoBio/goquery/expand.go | 70 +
.../github.com/PuerkitoBio/goquery/filter.go | 163 +++
vendor/github.com/PuerkitoBio/goquery/go.mod | 6 +
vendor/github.com/PuerkitoBio/goquery/go.sum | 5 +
.../PuerkitoBio/goquery/iteration.go | 39 +
.../PuerkitoBio/goquery/manipulation.go | 574 ++++++++
.../PuerkitoBio/goquery/property.go | 275 ++++
.../github.com/PuerkitoBio/goquery/query.go | 49 +
.../PuerkitoBio/goquery/traversal.go | 698 +++++++++
vendor/github.com/PuerkitoBio/goquery/type.go | 141 ++
.../PuerkitoBio/goquery/utilities.go | 161 ++
.../andybalholm/cascadia/.travis.yml | 14 +
.../github.com/andybalholm/cascadia/LICENSE | 24 +
.../github.com/andybalholm/cascadia/README.md | 7 +
vendor/github.com/andybalholm/cascadia/go.mod | 3 +
.../github.com/andybalholm/cascadia/parser.go | 835 +++++++++++
.../andybalholm/cascadia/selector.go | 622 ++++++++
.../github.com/antchfx/htmlquery/.gitignore | 32 +
.../github.com/antchfx/htmlquery/.travis.yml | 15 +
vendor/github.com/antchfx/htmlquery/LICENSE | 17 +
vendor/github.com/antchfx/htmlquery/README.md | 102 ++
vendor/github.com/antchfx/htmlquery/query.go | 291 ++++
vendor/github.com/antchfx/xmlquery/.gitignore | 32 +
.../github.com/antchfx/xmlquery/.travis.yml | 14 +
vendor/github.com/antchfx/xmlquery/LICENSE | 17 +
vendor/github.com/antchfx/xmlquery/README.md | 186 +++
vendor/github.com/antchfx/xmlquery/books.xml | 121 ++
vendor/github.com/antchfx/xmlquery/node.go | 302 ++++
vendor/github.com/antchfx/xmlquery/query.go | 264 ++++
vendor/github.com/fatih/camelcase/.travis.yml | 3 +
vendor/github.com/fatih/camelcase/LICENSE.md | 20 +
vendor/github.com/fatih/camelcase/README.md | 58 +
.../github.com/fatih/camelcase/camelcase.go | 90 ++
vendor/github.com/fatih/structtag/.travis.yml | 4 +
vendor/github.com/fatih/structtag/LICENSE | 60 +
vendor/github.com/fatih/structtag/README.md | 73 +
vendor/github.com/fatih/structtag/tags.go | 309 ++++
vendor/github.com/gobwas/glob/.gitignore | 8 +
vendor/github.com/gobwas/glob/.travis.yml | 9 +
vendor/github.com/gobwas/glob/LICENSE | 21 +
vendor/github.com/gobwas/glob/bench.sh | 26 +
.../gobwas/glob/compiler/compiler.go | 525 +++++++
vendor/github.com/gobwas/glob/glob.go | 80 +
vendor/github.com/gobwas/glob/match/any.go | 45 +
vendor/github.com/gobwas/glob/match/any_of.go | 82 ++
vendor/github.com/gobwas/glob/match/btree.go | 146 ++
.../github.com/gobwas/glob/match/contains.go | 58 +
.../github.com/gobwas/glob/match/every_of.go | 99 ++
vendor/github.com/gobwas/glob/match/list.go | 49 +
vendor/github.com/gobwas/glob/match/match.go | 81 ++
vendor/github.com/gobwas/glob/match/max.go | 49 +
vendor/github.com/gobwas/glob/match/min.go | 57 +
.../github.com/gobwas/glob/match/nothing.go | 27 +
vendor/github.com/gobwas/glob/match/prefix.go | 50 +
.../gobwas/glob/match/prefix_any.go | 55 +
.../gobwas/glob/match/prefix_suffix.go | 62 +
vendor/github.com/gobwas/glob/match/range.go | 48 +
vendor/github.com/gobwas/glob/match/row.go | 77 +
.../github.com/gobwas/glob/match/segments.go | 91 ++
vendor/github.com/gobwas/glob/match/single.go | 43 +
vendor/github.com/gobwas/glob/match/suffix.go | 35 +
.../gobwas/glob/match/suffix_any.go | 43 +
vendor/github.com/gobwas/glob/match/super.go | 33 +
vendor/github.com/gobwas/glob/match/text.go | 45 +
vendor/github.com/gobwas/glob/readme.md | 148 ++
.../github.com/gobwas/glob/syntax/ast/ast.go | 122 ++
.../gobwas/glob/syntax/ast/parser.go | 157 ++
.../gobwas/glob/syntax/lexer/lexer.go | 273 ++++
.../gobwas/glob/syntax/lexer/token.go | 88 ++
.../github.com/gobwas/glob/syntax/syntax.go | 14 +
.../gobwas/glob/util/runes/runes.go | 154 ++
.../gobwas/glob/util/strings/strings.go | 39 +
vendor/github.com/gocolly/colly/.codecov.yml | 1 +
vendor/github.com/gocolly/colly/.travis.yml | 17 +
vendor/github.com/gocolly/colly/CHANGELOG.md | 23 +
.../github.com/gocolly/colly/CONTRIBUTING.md | 67 +
vendor/github.com/gocolly/colly/LICENSE.txt | 202 +++
vendor/github.com/gocolly/colly/README.md | 112 ++
vendor/github.com/gocolly/colly/VERSION | 1 +
vendor/github.com/gocolly/colly/colly.go | 1293 +++++++++++++++++
vendor/github.com/gocolly/colly/context.go | 87 ++
.../github.com/gocolly/colly/debug/debug.go | 36 +
.../gocolly/colly/debug/logdebugger.go | 54 +
.../gocolly/colly/debug/webdebugger.go | 146 ++
.../github.com/gocolly/colly/htmlelement.go | 120 ++
.../github.com/gocolly/colly/http_backend.go | 227 +++
vendor/github.com/gocolly/colly/request.go | 180 +++
vendor/github.com/gocolly/colly/response.go | 99 ++
.../gocolly/colly/storage/storage.go | 128 ++
vendor/github.com/gocolly/colly/unmarshal.go | 171 +++
vendor/github.com/gocolly/colly/xmlelement.go | 170 +++
.../github.com/kennygrant/sanitize/.gitignore | 22 +
.../kennygrant/sanitize/.travis.yml | 1 +
vendor/github.com/kennygrant/sanitize/LICENSE | 27 +
.../github.com/kennygrant/sanitize/README.md | 62 +
.../kennygrant/sanitize/sanitize.go | 388 +++++
vendor/github.com/saintfish/chardet/2022.go | 102 ++
vendor/github.com/saintfish/chardet/AUTHORS | 1 +
vendor/github.com/saintfish/chardet/LICENSE | 22 +
vendor/github.com/saintfish/chardet/README.md | 10 +
.../github.com/saintfish/chardet/detector.go | 136 ++
.../saintfish/chardet/icu-license.html | 51 +
.../saintfish/chardet/multi_byte.go | 345 +++++
.../saintfish/chardet/recognizer.go | 83 ++
.../saintfish/chardet/single_byte.go | 882 +++++++++++
.../github.com/saintfish/chardet/unicode.go | 103 ++
vendor/github.com/saintfish/chardet/utf8.go | 71 +
vendor/github.com/temoto/robotstxt/.gitignore | 9 +
.../github.com/temoto/robotstxt/.travis.yml | 30 +
vendor/github.com/temoto/robotstxt/LICENSE | 21 +
vendor/github.com/temoto/robotstxt/README.rst | 112 ++
.../github.com/temoto/robotstxt/codecov.yml | 2 +
.../temoto/robotstxt/metalinter.json | 9 +
vendor/github.com/temoto/robotstxt/parser.go | 266 ++++
.../github.com/temoto/robotstxt/robotstxt.go | 231 +++
vendor/github.com/temoto/robotstxt/scanner.go | 205 +++
vendor/modules.txt | 33 +-
126 files changed, 15500 insertions(+), 58 deletions(-)
create mode 100644 vendor/github.com/PuerkitoBio/goquery/.gitattributes
create mode 100644 vendor/github.com/PuerkitoBio/goquery/.gitignore
create mode 100644 vendor/github.com/PuerkitoBio/goquery/.travis.yml
create mode 100644 vendor/github.com/PuerkitoBio/goquery/LICENSE
create mode 100644 vendor/github.com/PuerkitoBio/goquery/README.md
create mode 100644 vendor/github.com/PuerkitoBio/goquery/array.go
create mode 100644 vendor/github.com/PuerkitoBio/goquery/doc.go
create mode 100644 vendor/github.com/PuerkitoBio/goquery/expand.go
create mode 100644 vendor/github.com/PuerkitoBio/goquery/filter.go
create mode 100644 vendor/github.com/PuerkitoBio/goquery/go.mod
create mode 100644 vendor/github.com/PuerkitoBio/goquery/go.sum
create mode 100644 vendor/github.com/PuerkitoBio/goquery/iteration.go
create mode 100644 vendor/github.com/PuerkitoBio/goquery/manipulation.go
create mode 100644 vendor/github.com/PuerkitoBio/goquery/property.go
create mode 100644 vendor/github.com/PuerkitoBio/goquery/query.go
create mode 100644 vendor/github.com/PuerkitoBio/goquery/traversal.go
create mode 100644 vendor/github.com/PuerkitoBio/goquery/type.go
create mode 100644 vendor/github.com/PuerkitoBio/goquery/utilities.go
create mode 100644 vendor/github.com/andybalholm/cascadia/.travis.yml
create mode 100644 vendor/github.com/andybalholm/cascadia/LICENSE
create mode 100644 vendor/github.com/andybalholm/cascadia/README.md
create mode 100644 vendor/github.com/andybalholm/cascadia/go.mod
create mode 100644 vendor/github.com/andybalholm/cascadia/parser.go
create mode 100644 vendor/github.com/andybalholm/cascadia/selector.go
create mode 100644 vendor/github.com/antchfx/htmlquery/.gitignore
create mode 100644 vendor/github.com/antchfx/htmlquery/.travis.yml
create mode 100644 vendor/github.com/antchfx/htmlquery/LICENSE
create mode 100644 vendor/github.com/antchfx/htmlquery/README.md
create mode 100644 vendor/github.com/antchfx/htmlquery/query.go
create mode 100644 vendor/github.com/antchfx/xmlquery/.gitignore
create mode 100644 vendor/github.com/antchfx/xmlquery/.travis.yml
create mode 100644 vendor/github.com/antchfx/xmlquery/LICENSE
create mode 100644 vendor/github.com/antchfx/xmlquery/README.md
create mode 100644 vendor/github.com/antchfx/xmlquery/books.xml
create mode 100644 vendor/github.com/antchfx/xmlquery/node.go
create mode 100644 vendor/github.com/antchfx/xmlquery/query.go
create mode 100644 vendor/github.com/fatih/camelcase/.travis.yml
create mode 100644 vendor/github.com/fatih/camelcase/LICENSE.md
create mode 100644 vendor/github.com/fatih/camelcase/README.md
create mode 100644 vendor/github.com/fatih/camelcase/camelcase.go
create mode 100644 vendor/github.com/fatih/structtag/.travis.yml
create mode 100644 vendor/github.com/fatih/structtag/LICENSE
create mode 100644 vendor/github.com/fatih/structtag/README.md
create mode 100644 vendor/github.com/fatih/structtag/tags.go
create mode 100644 vendor/github.com/gobwas/glob/.gitignore
create mode 100644 vendor/github.com/gobwas/glob/.travis.yml
create mode 100644 vendor/github.com/gobwas/glob/LICENSE
create mode 100644 vendor/github.com/gobwas/glob/bench.sh
create mode 100644 vendor/github.com/gobwas/glob/compiler/compiler.go
create mode 100644 vendor/github.com/gobwas/glob/glob.go
create mode 100644 vendor/github.com/gobwas/glob/match/any.go
create mode 100644 vendor/github.com/gobwas/glob/match/any_of.go
create mode 100644 vendor/github.com/gobwas/glob/match/btree.go
create mode 100644 vendor/github.com/gobwas/glob/match/contains.go
create mode 100644 vendor/github.com/gobwas/glob/match/every_of.go
create mode 100644 vendor/github.com/gobwas/glob/match/list.go
create mode 100644 vendor/github.com/gobwas/glob/match/match.go
create mode 100644 vendor/github.com/gobwas/glob/match/max.go
create mode 100644 vendor/github.com/gobwas/glob/match/min.go
create mode 100644 vendor/github.com/gobwas/glob/match/nothing.go
create mode 100644 vendor/github.com/gobwas/glob/match/prefix.go
create mode 100644 vendor/github.com/gobwas/glob/match/prefix_any.go
create mode 100644 vendor/github.com/gobwas/glob/match/prefix_suffix.go
create mode 100644 vendor/github.com/gobwas/glob/match/range.go
create mode 100644 vendor/github.com/gobwas/glob/match/row.go
create mode 100644 vendor/github.com/gobwas/glob/match/segments.go
create mode 100644 vendor/github.com/gobwas/glob/match/single.go
create mode 100644 vendor/github.com/gobwas/glob/match/suffix.go
create mode 100644 vendor/github.com/gobwas/glob/match/suffix_any.go
create mode 100644 vendor/github.com/gobwas/glob/match/super.go
create mode 100644 vendor/github.com/gobwas/glob/match/text.go
create mode 100644 vendor/github.com/gobwas/glob/readme.md
create mode 100644 vendor/github.com/gobwas/glob/syntax/ast/ast.go
create mode 100644 vendor/github.com/gobwas/glob/syntax/ast/parser.go
create mode 100644 vendor/github.com/gobwas/glob/syntax/lexer/lexer.go
create mode 100644 vendor/github.com/gobwas/glob/syntax/lexer/token.go
create mode 100644 vendor/github.com/gobwas/glob/syntax/syntax.go
create mode 100644 vendor/github.com/gobwas/glob/util/runes/runes.go
create mode 100644 vendor/github.com/gobwas/glob/util/strings/strings.go
create mode 100644 vendor/github.com/gocolly/colly/.codecov.yml
create mode 100644 vendor/github.com/gocolly/colly/.travis.yml
create mode 100644 vendor/github.com/gocolly/colly/CHANGELOG.md
create mode 100644 vendor/github.com/gocolly/colly/CONTRIBUTING.md
create mode 100644 vendor/github.com/gocolly/colly/LICENSE.txt
create mode 100644 vendor/github.com/gocolly/colly/README.md
create mode 100644 vendor/github.com/gocolly/colly/VERSION
create mode 100644 vendor/github.com/gocolly/colly/colly.go
create mode 100644 vendor/github.com/gocolly/colly/context.go
create mode 100644 vendor/github.com/gocolly/colly/debug/debug.go
create mode 100644 vendor/github.com/gocolly/colly/debug/logdebugger.go
create mode 100644 vendor/github.com/gocolly/colly/debug/webdebugger.go
create mode 100644 vendor/github.com/gocolly/colly/htmlelement.go
create mode 100644 vendor/github.com/gocolly/colly/http_backend.go
create mode 100644 vendor/github.com/gocolly/colly/request.go
create mode 100644 vendor/github.com/gocolly/colly/response.go
create mode 100644 vendor/github.com/gocolly/colly/storage/storage.go
create mode 100644 vendor/github.com/gocolly/colly/unmarshal.go
create mode 100644 vendor/github.com/gocolly/colly/xmlelement.go
create mode 100644 vendor/github.com/kennygrant/sanitize/.gitignore
create mode 100644 vendor/github.com/kennygrant/sanitize/.travis.yml
create mode 100644 vendor/github.com/kennygrant/sanitize/LICENSE
create mode 100644 vendor/github.com/kennygrant/sanitize/README.md
create mode 100644 vendor/github.com/kennygrant/sanitize/sanitize.go
create mode 100644 vendor/github.com/saintfish/chardet/2022.go
create mode 100644 vendor/github.com/saintfish/chardet/AUTHORS
create mode 100644 vendor/github.com/saintfish/chardet/LICENSE
create mode 100644 vendor/github.com/saintfish/chardet/README.md
create mode 100644 vendor/github.com/saintfish/chardet/detector.go
create mode 100644 vendor/github.com/saintfish/chardet/icu-license.html
create mode 100644 vendor/github.com/saintfish/chardet/multi_byte.go
create mode 100644 vendor/github.com/saintfish/chardet/recognizer.go
create mode 100644 vendor/github.com/saintfish/chardet/single_byte.go
create mode 100644 vendor/github.com/saintfish/chardet/unicode.go
create mode 100644 vendor/github.com/saintfish/chardet/utf8.go
create mode 100644 vendor/github.com/temoto/robotstxt/.gitignore
create mode 100644 vendor/github.com/temoto/robotstxt/.travis.yml
create mode 100644 vendor/github.com/temoto/robotstxt/LICENSE
create mode 100644 vendor/github.com/temoto/robotstxt/README.rst
create mode 100644 vendor/github.com/temoto/robotstxt/codecov.yml
create mode 100644 vendor/github.com/temoto/robotstxt/metalinter.json
create mode 100644 vendor/github.com/temoto/robotstxt/parser.go
create mode 100644 vendor/github.com/temoto/robotstxt/robotstxt.go
create mode 100644 vendor/github.com/temoto/robotstxt/scanner.go
diff --git a/go.mod b/go.mod
index 74e6a706d..b36bd367f 100644
--- a/go.mod
+++ b/go.mod
@@ -8,14 +8,16 @@ require (
github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4 // indirect
github.com/ChrisTrenkamp/goxpath v0.0.0-20170625215350-4fe035839290
github.com/NaverCloudPlatform/ncloud-sdk-go v0.0.0-20180110055012-c2e73f942591
+ github.com/PuerkitoBio/goquery v1.5.0 // indirect
github.com/Telmate/proxmox-api-go v0.0.0-20190410200643-f08824d5082d
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af // indirect
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190418113227-25233c783f4e
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20170113022742-e6dbea820a9f
+ github.com/antchfx/htmlquery v1.0.0 // indirect
+ github.com/antchfx/xmlquery v1.0.0 // indirect
github.com/antchfx/xpath v0.0.0-20170728053731-b5c552e1acbd // indirect
github.com/antchfx/xquery v0.0.0-20170730121040-eb8c3c172607 // indirect
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6 // indirect
- github.com/apache/thrift v0.12.0 // indirect
github.com/approvals/go-approval-tests v0.0.0-20160714161514-ad96e53bea43
github.com/armon/go-radix v1.0.0 // indirect
github.com/aws/aws-sdk-go v1.16.24
@@ -29,8 +31,13 @@ require (
github.com/docker/docker v0.0.0-20180422163414-57142e89befe // indirect
github.com/dylanmei/iso8601 v0.1.0 // indirect
github.com/dylanmei/winrmtest v0.0.0-20170819153634-c2fbb09e6c08
+ github.com/fatih/camelcase v1.0.0
+ github.com/fatih/structtag v1.0.0
github.com/go-ini/ini v1.25.4
+ github.com/gobwas/glob v0.2.3 // indirect
+ github.com/gocolly/colly v1.2.0
github.com/gofrs/flock v0.7.1
+ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
github.com/google/go-cmp v0.2.0
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9
github.com/google/uuid v1.0.0
@@ -59,6 +66,7 @@ require (
github.com/json-iterator/go v1.1.6 // indirect
github.com/jtolds/gls v4.2.1+incompatible // indirect
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1
+ github.com/kennygrant/sanitize v1.2.4 // indirect
github.com/klauspost/compress v0.0.0-20160131094358-f86d2e6d8a77 // indirect
github.com/klauspost/cpuid v0.0.0-20160106104451-349c67577817 // indirect
github.com/klauspost/crc32 v0.0.0-20160114101742-999f3125931f // indirect
@@ -66,7 +74,6 @@ require (
github.com/kr/fs v0.0.0-20131111012553-2788f0dbd169 // indirect
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 // indirect
github.com/linode/linodego v0.7.1
- github.com/marstr/guid v0.0.0-20170427235115-8bdf7d1a087c // indirect
github.com/masterzen/azure-sdk-for-go v0.0.0-20161014135628-ee4f0065d00c // indirect
github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 // indirect
github.com/masterzen/winrm v0.0.0-20180224160350-7e40f93ae939
@@ -81,7 +88,6 @@ require (
github.com/mitchellh/panicwrap v0.0.0-20170106182340-fce601fe5557
github.com/mitchellh/prefixedio v0.0.0-20151214002211-6e6954073784
github.com/mitchellh/reflectwalk v1.0.0
- github.com/mna/pigeon v1.0.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/moul/anonuuid v0.0.0-20160222162117-609b752a95ef // indirect
@@ -89,7 +95,8 @@ require (
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 // indirect
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d // indirect
github.com/olekukonko/tablewriter v0.0.0-20180105111133-96aac992fc8b
- github.com/openzipkin/zipkin-go v0.1.6 // indirect
+ github.com/onsi/ginkgo v1.7.0 // indirect
+ github.com/onsi/gomega v1.4.3 // indirect
github.com/oracle/oci-go-sdk v1.8.0
github.com/packer-community/winrmcp v0.0.0-20180921204643-0fd363d6159a
github.com/pierrec/lz4 v2.0.5+incompatible
@@ -97,15 +104,17 @@ require (
github.com/pkg/sftp v0.0.0-20160118190721-e84cc8c755ca
github.com/posener/complete v1.1.1
github.com/profitbricks/profitbricks-sdk-go v4.0.2+incompatible
- github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 // indirect
github.com/renstrom/fuzzysearch v0.0.0-20160331204855-2d205ac6ec17 // indirect
github.com/rwtodd/Go.Sed v0.0.0-20170507045331-d6d5d585814e
github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735 // indirect
+ github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca // indirect
github.com/satori/go.uuid v1.2.0 // indirect
github.com/scaleway/scaleway-cli v0.0.0-20180921094345-7b12c9699d70
+ github.com/sirupsen/logrus v1.2.0 // indirect
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d // indirect
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c // indirect
github.com/stretchr/testify v1.3.0
+ github.com/temoto/robotstxt v0.0.0-20180810133444-97ee4a9ee6ea // indirect
github.com/tencentcloud/tencentcloud-sdk-go v0.0.0-20181220135002-f1744d40d346
github.com/ugorji/go v0.0.0-20151218193438-646ae4a518c1
github.com/ulikunitz/xz v0.5.5
@@ -119,7 +128,6 @@ require (
golang.org/x/sync v0.0.0-20190423024810-112230192c58
golang.org/x/sys v0.0.0-20190425145619-16072639606e
golang.org/x/text v0.3.1 // indirect
- golang.org/x/tools v0.0.0-20190530184349-ce1a3806b557 // indirect
google.golang.org/api v0.4.0
google.golang.org/grpc v1.20.1
gopkg.in/h2non/gock.v1 v1.0.12 // indirect
diff --git a/go.sum b/go.sum
index a844f081d..2188154be 100644
--- a/go.sum
+++ b/go.sum
@@ -3,8 +3,6 @@ cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.36.0 h1:+aCSj7tOo2LODWVEuZDZeGCckdt6MlSF+X/rB3wUiS8=
cloud.google.com/go v0.36.0/go.mod h1:RUoy9p/M4ge0HzT8L+SDZ8jg+Q6fth0CiBuhFJpSV40=
-contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc=
-contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA=
contrib.go.opencensus.io/exporter/ocagent v0.5.0 h1:TKXjQSRS0/cCDrP7KvkgU6SmILtF/yV2TOs/02K/WZQ=
contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0=
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
@@ -14,16 +12,10 @@ dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/1and1/oneandone-cloudserver-sdk-go v1.0.1 h1:RMTyvS5bjvSWiUcfqfr/E2pxHEMrALvU+E12n6biymg=
github.com/1and1/oneandone-cloudserver-sdk-go v1.0.1/go.mod h1:61apmbkVJH4kg+38ftT+/l0XxdUCVnHggqcOTqZRSEE=
-github.com/Azure/azure-sdk-for-go v27.3.0+incompatible h1:i+ROfG3CsZUPoVAnhK06T3R6PmBzKB9ds+lHBpN7Mzo=
-github.com/Azure/azure-sdk-for-go v27.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v30.0.0+incompatible h1:6o1Yzl7wTBYg+xw0pY4qnalaPmEQolubEEdepo1/kmI=
github.com/Azure/azure-sdk-for-go v30.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/go-autorest v10.12.0+incompatible h1:6YphwUK+oXbzvCc1fd5VrnxCekwzDkpA7gUEbci2MvI=
-github.com/Azure/go-autorest v10.12.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest v12.0.0+incompatible h1:N+VqClcomLGD/sHb3smbSYYtNMgKpVV3Cd5r5i8z6bQ=
github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0SubHbpV3p4/Vc=
-github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88=
github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4 h1:pSm8mp0T2OH2CPmPDPtwHPr3VAQaOwVF/JbllOPP4xA=
github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
@@ -31,26 +23,29 @@ github.com/ChrisTrenkamp/goxpath v0.0.0-20170625215350-4fe035839290 h1:K9I21XUHN
github.com/ChrisTrenkamp/goxpath v0.0.0-20170625215350-4fe035839290/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4=
github.com/NaverCloudPlatform/ncloud-sdk-go v0.0.0-20180110055012-c2e73f942591 h1:/P9HCl71+Eh6vDbKNyRu+rpIIR70UCZWNOGexVV3e6k=
github.com/NaverCloudPlatform/ncloud-sdk-go v0.0.0-20180110055012-c2e73f942591/go.mod h1:EHGzQGbwozJBj/4qj3WGrTJ0FqjgOTOxLQ0VNWvPn08=
-github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
-github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/PuerkitoBio/goquery v1.5.0 h1:uGvmFXOA73IKluu/F84Xd1tt/z07GYm8X49XKHP7EJk=
+github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg=
github.com/Telmate/proxmox-api-go v0.0.0-20190410200643-f08824d5082d h1:igrCnHheXb+lZ1bW9Ths8JZZIjh9D4Vi/49JqiHE+cI=
github.com/Telmate/proxmox-api-go v0.0.0-20190410200643-f08824d5082d/go.mod h1:OGWyIMJ87/k/GCz8CGiWB2HOXsOVDM6Lpe/nFPkC4IQ=
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14=
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190418113227-25233c783f4e h1:/8wOj52pewmIX/8d5eVO3t7Rr3astkBI/ruyg4WNqRo=
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190418113227-25233c783f4e/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA=
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20170113022742-e6dbea820a9f h1:jI4DIE5Vf4oRaHfthB0oRhU+yuYuoOTurDzwAlskP00=
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20170113022742-e6dbea820a9f/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
+github.com/andybalholm/cascadia v1.0.0 h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=
+github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/antchfx/htmlquery v1.0.0 h1:O5IXz8fZF3B3MW+B33MZWbTHBlYmcfw0BAxgErHuaMA=
+github.com/antchfx/htmlquery v1.0.0/go.mod h1:MS9yksVSQXls00iXkiMqXr0J+umL/AmxXKuP28SUJM8=
+github.com/antchfx/xmlquery v1.0.0 h1:YuEPqexGG2opZKNc9JU3Zw6zFXwC47wNcy6/F8oKsrM=
+github.com/antchfx/xmlquery v1.0.0/go.mod h1:/+CnyD/DzHRnv2eRxrVbieRU/FIF6N0C+7oTtyUtCKk=
github.com/antchfx/xpath v0.0.0-20170728053731-b5c552e1acbd h1:S3Fr6QnkpW9VRjiEY4psQHhhbbahASuNVj52YIce7lI=
github.com/antchfx/xpath v0.0.0-20170728053731-b5c552e1acbd/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
github.com/antchfx/xquery v0.0.0-20170730121040-eb8c3c172607 h1:BFFG6KP8ASFBg2ptWsJn8p8RDufBjBDKIxLU7BTYGOM=
github.com/antchfx/xquery v0.0.0-20170730121040-eb8c3c172607/go.mod h1:LzD22aAzDP8/dyiCKFp31He4m2GPjl0AFyzDtZzUu9M=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6 h1:uZuxRZCz65cG1o6K/xUqImNcYKtmk9ylqaH0itMSvzA=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
-github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/approvals/go-approval-tests v0.0.0-20160714161514-ad96e53bea43 h1:ePCAQPf5tUc5IMcUvu6euhSGna7jzs7eiXtJXHig6Zc=
github.com/approvals/go-approval-tests v0.0.0-20160714161514-ad96e53bea43/go.mod h1:S6puKjZ9ZeqUPBv2hEBnMZGcM2J6mOsDRQcmxkMAND0=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
@@ -97,11 +92,12 @@ github.com/dylanmei/iso8601 v0.1.0 h1:812NGQDBcqquTfH5Yeo7lwR0nzx/cKdsmf3qMjPURU
github.com/dylanmei/iso8601 v0.1.0/go.mod h1:w9KhXSgIyROl1DefbMYIE7UVSIvELTbMrCfx+QkYnoQ=
github.com/dylanmei/winrmtest v0.0.0-20170819153634-c2fbb09e6c08 h1:0bp6/GrNOrTDtSXe9YYGCwf8jp5Fb/b+4a6MTRm4qzY=
github.com/dylanmei/winrmtest v0.0.0-20170819153634-c2fbb09e6c08/go.mod h1:VBVDFSBXCIW8JaHQpI8lldSKfYaLMzP9oyq6IJ4fhzY=
-github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
-github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
-github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=
+github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/structtag v1.0.0 h1:pTHj65+u3RKWYPSGaU290FpI/dXxTaHdVwVwbcPKmEc=
+github.com/fatih/structtag v1.0.0/go.mod h1:IKitwq45uXL/yqi5mYghiD3w9H6eTOvI9vnk8tXMphA=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@@ -110,13 +106,13 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-ini/ini v1.25.4 h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/gocolly/colly v1.2.0 h1:qRz9YAn8FIH0qzgNUw+HT9UN7wm1oF9OBAilwEWpyrI=
+github.com/gocolly/colly v1.2.0/go.mod h1:Hof5T3ZswNVsOHYmba1u03W65HDWgpV5HifSuueE0EA=
github.com/gofrs/flock v0.7.1 h1:DP+LD/t0njgoPBvT5MJLeliUIVQR03hiKR6vezdwHlc=
github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
@@ -153,8 +149,6 @@ github.com/gophercloud/utils v0.0.0-20190124192022-a5c25e7a53a6/go.mod h1:wjDF8z
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
-github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/websocket v0.0.0-20170319172727-a91eba7f9777 h1:JIM+OacoOJRU30xpjMf8sulYqjr0ViA3WDrTX6j/yDI=
github.com/gorilla/websocket v0.0.0-20170319172727-a91eba7f9777/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
@@ -226,9 +220,10 @@ github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwK
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 h1:PJPDf8OUfOK1bb/NeTKd4f1QXZItOX389VN3B6qC8ro=
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
+github.com/kennygrant/sanitize v1.2.4 h1:gN25/otpP5vAsO2djbMhF/LQX6R7+O1TB4yv8NzpJ3o=
+github.com/kennygrant/sanitize v1.2.4/go.mod h1:LGsjYYtgxbetdg5owWB2mpgUL6e2nfw2eObZ0u0qvak=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v0.0.0-20160131094358-f86d2e6d8a77 h1:rJnR80lkojFgjdg/oQPhbZoY8t8uM51XMz8DrJrjabk=
github.com/klauspost/compress v0.0.0-20160131094358-f86d2e6d8a77/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
@@ -242,7 +237,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGi
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.0.0-20131111012553-2788f0dbd169 h1:YUrU1/jxRqnt0PSrKj1Uj/wEjk/fjnE80QFfi2Zlj7Q=
github.com/kr/fs v0.0.0-20131111012553-2788f0dbd169/go.mod h1:glhvuHOU9Hy7/8PwwdtnarXqLagOX0b/TbZx2zLMqEg=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -253,8 +247,6 @@ github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3v
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
github.com/linode/linodego v0.7.1 h1:4WZmMpSA2NRwlPZcc0+4Gyn7rr99Evk9bnr0B3gXRKE=
github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY=
-github.com/marstr/guid v0.0.0-20170427235115-8bdf7d1a087c h1:N7uWGS2fTwH/4BwxbHiJZNAFTSJ5yPU0emHsQWvkxEY=
-github.com/marstr/guid v0.0.0-20170427235115-8bdf7d1a087c/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
github.com/masterzen/azure-sdk-for-go v0.0.0-20161014135628-ee4f0065d00c h1:FMUOnVGy8nWk1cvlMCAoftRItQGMxI0vzJ3dQjeZTCE=
github.com/masterzen/azure-sdk-for-go v0.0.0-20161014135628-ee4f0065d00c/go.mod h1:mf8fjOu33zCqxUjuiU3I8S1lJMyEAlH+0F2+M5xl3hE=
github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 h1:2ZKn+w/BJeL43sCxI2jhPLRv73oVVOjEKZjKkflyqxg=
@@ -297,8 +289,6 @@ github.com/mitchellh/prefixedio v0.0.0-20151214002211-6e6954073784 h1:+DAetXqxv/
github.com/mitchellh/prefixedio v0.0.0-20151214002211-6e6954073784/go.mod h1:kB1naBgV9ORnkiTVeyJOI1DavaJkG4oNIq0Af6ZVKUo=
github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
-github.com/mna/pigeon v1.0.0 h1:n46IoStjdzjaXuyBH53j9HZ8CVqGWpC7P5/v8dP4qEY=
-github.com/mna/pigeon v1.0.0/go.mod h1:Iym28+kJVnC1hfQvv5MUtI6AiFFzvQjHcvI4RFTG/04=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
@@ -307,7 +297,6 @@ github.com/moul/anonuuid v0.0.0-20160222162117-609b752a95ef h1:E/seV1Rtsnr2juBw1
github.com/moul/anonuuid v0.0.0-20160222162117-609b752a95ef/go.mod h1:LgKrp0Iss/BVwquptq4eIe6HPr0s3t1WHT5x0qOh14U=
github.com/moul/gotty-client v0.0.0-20180327180212-b26a57ebc215 h1:y6FZWUBBt1iPmJyGbGza3ncvVBMKzgd32oFChRZR7Do=
github.com/moul/gotty-client v0.0.0-20180327180212-b26a57ebc215/go.mod h1:CxM/JGtpRrEPve5H04IhxJrGhxgwxMc6jSP2T4YD60w=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4=
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
@@ -322,7 +311,6 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
-github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/oracle/oci-go-sdk v1.8.0 h1:4SO45bKV0I3/Mn1os3ANDZmV0eSE5z5CLdSUIkxtyzs=
github.com/oracle/oci-go-sdk v1.8.0/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=
github.com/packer-community/winrmcp v0.0.0-20180921204643-0fd363d6159a h1:A3QMuteviunoaY/8ex+RKFqwhcZJ/Cf3fCW3IwL2wx4=
@@ -342,16 +330,9 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
github.com/profitbricks/profitbricks-sdk-go v4.0.2+incompatible h1:ZoVHH6voxW9Onzo6z2yLtocVoN6mBocyDoqoyAMHokE=
github.com/profitbricks/profitbricks-sdk-go v4.0.2+incompatible/go.mod h1:T3/WrziK7fYH3C8ilAFAHe99R452/IzIG3YYkqaOFeQ=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/renstrom/fuzzysearch v0.0.0-20160331204855-2d205ac6ec17 h1:4qPms2txLWMLXKzqlnYSulKRS4cS9aYgPtAEpUelQok=
github.com/renstrom/fuzzysearch v0.0.0-20160331204855-2d205ac6ec17/go.mod h1:SAEjPB4voP88qmWJXI7mA5m15uNlEnuHLx4Eu2mPGpQ=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
@@ -361,6 +342,8 @@ github.com/rwtodd/Go.Sed v0.0.0-20170507045331-d6d5d585814e/go.mod h1:8AEUvGVi2u
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735 h1:7YvPJVmEeFHR1Tj9sZEYsmarJEQfMVYpd/Vyy/A8dqE=
github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
+github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca h1:NugYot0LIVPxTvN8n+Kvkn6TrbMyxQiuvKdEwFdR9vI=
+github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/scaleway/scaleway-cli v0.0.0-20180921094345-7b12c9699d70 h1:DaqC32ZwOuO4ctgg9qAdKnlQxwFPkKmCOEqwSNwYy7c=
@@ -406,6 +389,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
+github.com/temoto/robotstxt v0.0.0-20180810133444-97ee4a9ee6ea h1:hH8P1IiDpzRU6ZDbDh/RDnVuezi2oOXJpApa06M0zyI=
+github.com/temoto/robotstxt v0.0.0-20180810133444-97ee4a9ee6ea/go.mod h1:aOux3gHPCftJ3KHq6Pz/AlDjYJ7Y+yKfm1gU/3B0u04=
github.com/tencentcloud/tencentcloud-sdk-go v0.0.0-20181220135002-f1744d40d346 h1:a014AaXz7AISMePv8xKRffUZZkr5z2XmSDf41gRV3+A=
github.com/tencentcloud/tencentcloud-sdk-go v0.0.0-20181220135002-f1744d40d346/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4=
github.com/ugorji/go v0.0.0-20151218193438-646ae4a518c1 h1:U6ufy3mLDgg9RYupntOvAF7xCmNNquyKaYaaVHo1Nnk=
@@ -422,10 +407,6 @@ github.com/yandex-cloud/go-sdk v0.0.0-20190402114215-3fc1d6947035 h1:2ZLZeg6xp+k
github.com/yandex-cloud/go-sdk v0.0.0-20190402114215-3fc1d6947035/go.mod h1:Eml0jFLU4VVHgIN8zPHMuNwZXVzUMILyO6lQZSfz854=
go.opencensus.io v0.18.0 h1:Mk5rgZcggtbvtAun5aJzAtjKKN/t0R3jJPlWILlv938=
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
-go.opencensus.io v0.20.1 h1:pMEjRZ1M4ebWGikflH7nQpV6+Zr88KBMA2XJD3sbijw=
-go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
-go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk=
-go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
@@ -444,6 +425,7 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -455,7 +437,6 @@ golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsq
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd h1:HuTn7WObtcDo9uEEU7rEqL0jYthdXAmZ6PP+meazmaU=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
@@ -487,8 +468,6 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuq
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -512,14 +491,10 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190530184349-ce1a3806b557 h1:WFdP1eIY3AwGUPgVua5UIX4C7BzCIK8TOwm6RA+0vAQ=
-golang.org/x/tools v0.0.0-20190530184349-ce1a3806b557/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.1.0 h1:K6z2u68e86TPdSdefXdzvXgR1zEMa+459vBSfWYAZkI=
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
-google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8=
-google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0 h1:KKgc1aqhV8wDPbDzlDtpvyjZFY3vjz85FP7p4wcQUyI=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
@@ -540,11 +515,8 @@ google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9M
google.golang.org/grpc v1.17.0 h1:TRJYBgMclJvGYn2rIMjj+h9KtMt5r1Ij7ODVRIZkwhk=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM=
-google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
diff --git a/vendor/github.com/PuerkitoBio/goquery/.gitattributes b/vendor/github.com/PuerkitoBio/goquery/.gitattributes
new file mode 100644
index 000000000..0cc26ec01
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/.gitattributes
@@ -0,0 +1 @@
+testdata/* linguist-vendored
diff --git a/vendor/github.com/PuerkitoBio/goquery/.gitignore b/vendor/github.com/PuerkitoBio/goquery/.gitignore
new file mode 100644
index 000000000..970381cd2
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/.gitignore
@@ -0,0 +1,16 @@
+# editor temporary files
+*.sublime-*
+.DS_Store
+*.swp
+#*.*#
+tags
+
+# direnv config
+.env*
+
+# test binaries
+*.test
+
+# coverage and profilte outputs
+*.out
+
diff --git a/vendor/github.com/PuerkitoBio/goquery/.travis.yml b/vendor/github.com/PuerkitoBio/goquery/.travis.yml
new file mode 100644
index 000000000..cc1402d5c
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/.travis.yml
@@ -0,0 +1,16 @@
+language: go
+
+go:
+ - 1.1
+ - 1.2.x
+ - 1.3.x
+ - 1.4.x
+ - 1.5.x
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - "1.10.x"
+ - 1.11.x
+ - tip
+
diff --git a/vendor/github.com/PuerkitoBio/goquery/LICENSE b/vendor/github.com/PuerkitoBio/goquery/LICENSE
new file mode 100644
index 000000000..f743d3728
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/LICENSE
@@ -0,0 +1,12 @@
+Copyright (c) 2012-2016, Martin Angers & Contributors
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/PuerkitoBio/goquery/README.md b/vendor/github.com/PuerkitoBio/goquery/README.md
new file mode 100644
index 000000000..84f9af39e
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/README.md
@@ -0,0 +1,179 @@
+# goquery - a little like that j-thing, only in Go
+[![build status](https://secure.travis-ci.org/PuerkitoBio/goquery.svg?branch=master)](http://travis-ci.org/PuerkitoBio/goquery) [![GoDoc](https://godoc.org/github.com/PuerkitoBio/goquery?status.png)](http://godoc.org/github.com/PuerkitoBio/goquery) [![Sourcegraph Badge](https://sourcegraph.com/github.com/PuerkitoBio/goquery/-/badge.svg)](https://sourcegraph.com/github.com/PuerkitoBio/goquery?badge)
+
+goquery brings a syntax and a set of features similar to [jQuery][] to the [Go language][go]. It is based on Go's [net/html package][html] and the CSS Selector library [cascadia][]. Since the net/html parser returns nodes, and not a full-featured DOM tree, jQuery's stateful manipulation functions (like height(), css(), detach()) have been left off.
+
+Also, because the net/html parser requires UTF-8 encoding, so does goquery: it is the caller's responsibility to ensure that the source document provides UTF-8 encoded HTML. See the [wiki][] for various options to do this.
+
+Syntax-wise, it is as close as possible to jQuery, with the same function names when possible, and that warm and fuzzy chainable interface. jQuery being the ultra-popular library that it is, I felt that writing a similar HTML-manipulating library was better to follow its API than to start anew (in the same spirit as Go's `fmt` package), even though some of its methods are less than intuitive (looking at you, [index()][index]...).
+
+## Table of Contents
+
+* [Installation](#installation)
+* [Changelog](#changelog)
+* [API](#api)
+* [Examples](#examples)
+* [Related Projects](#related-projects)
+* [Support](#support)
+* [License](#license)
+
+## Installation
+
+Please note that because of the net/html dependency, goquery requires Go1.1+.
+
+ $ go get github.com/PuerkitoBio/goquery
+
+(optional) To run unit tests:
+
+ $ cd $GOPATH/src/github.com/PuerkitoBio/goquery
+ $ go test
+
+(optional) To run benchmarks (warning: it runs for a few minutes):
+
+ $ cd $GOPATH/src/github.com/PuerkitoBio/goquery
+ $ go test -bench=".*"
+
+## Changelog
+
+**Note that goquery's API is now stable, and will not break.**
+
+* **2018-11-15 (v1.5.0)** : Go module support (thanks @Zaba505).
+* **2018-06-07 (v1.4.1)** : Add `NewDocumentFromReader` examples.
+* **2018-03-24 (v1.4.0)** : Deprecate `NewDocument(url)` and `NewDocumentFromResponse(response)`.
+* **2018-01-28 (v1.3.0)** : Add `ToEnd` constant to `Slice` until the end of the selection (thanks to @davidjwilkins for raising the issue).
+* **2018-01-11 (v1.2.0)** : Add `AddBack*` and deprecate `AndSelf` (thanks to @davidjwilkins).
+* **2017-02-12 (v1.1.0)** : Add `SetHtml` and `SetText` (thanks to @glebtv).
+* **2016-12-29 (v1.0.2)** : Optimize allocations for `Selection.Text` (thanks to @radovskyb).
+* **2016-08-28 (v1.0.1)** : Optimize performance for large documents.
+* **2016-07-27 (v1.0.0)** : Tag version 1.0.0.
+* **2016-06-15** : Invalid selector strings internally compile to a `Matcher` implementation that never matches any node (instead of a panic). So for example, `doc.Find("~")` returns an empty `*Selection` object.
+* **2016-02-02** : Add `NodeName` utility function similar to the DOM's `nodeName` property. It returns the tag name of the first element in a selection, and other relevant values of non-element nodes (see godoc for details). Add `OuterHtml` utility function similar to the DOM's `outerHTML` property (named `OuterHtml` in small caps for consistency with the existing `Html` method on the `Selection`).
+* **2015-04-20** : Add `AttrOr` helper method to return the attribute's value or a default value if absent. Thanks to [piotrkowalczuk][piotr].
+* **2015-02-04** : Add more manipulation functions - Prepend* - thanks again to [Andrew Stone][thatguystone].
+* **2014-11-28** : Add more manipulation functions - ReplaceWith*, Wrap* and Unwrap - thanks again to [Andrew Stone][thatguystone].
+* **2014-11-07** : Add manipulation functions (thanks to [Andrew Stone][thatguystone]) and `*Matcher` functions, that receive compiled cascadia selectors instead of selector strings, thus avoiding potential panics thrown by goquery via `cascadia.MustCompile` calls. This results in better performance (selectors can be compiled once and reused) and more idiomatic error handling (you can handle cascadia's compilation errors, instead of recovering from panics, which had been bugging me for a long time). Note that the actual type expected is a `Matcher` interface, that `cascadia.Selector` implements. Other matcher implementations could be used.
+* **2014-11-06** : Change import paths of net/html to golang.org/x/net/html (see https://groups.google.com/forum/#!topic/golang-nuts/eD8dh3T9yyA). Make sure to update your code to use the new import path too when you call goquery with `html.Node`s.
+* **v0.3.2** : Add `NewDocumentFromReader()` (thanks jweir) which allows creating a goquery document from an io.Reader.
+* **v0.3.1** : Add `NewDocumentFromResponse()` (thanks assassingj) which allows creating a goquery document from an http response.
+* **v0.3.0** : Add `EachWithBreak()` which allows to break out of an `Each()` loop by returning false. This function was added instead of changing the existing `Each()` to avoid breaking compatibility.
+* **v0.2.1** : Make go-getable, now that [go.net/html is Go1.0-compatible][gonet] (thanks to @matrixik for pointing this out).
+* **v0.2.0** : Add support for negative indices in Slice(). **BREAKING CHANGE** `Document.Root` is removed, `Document` is now a `Selection` itself (a selection of one, the root element, just like `Document.Root` was before). Add jQuery's Closest() method.
+* **v0.1.1** : Add benchmarks to use as baseline for refactorings, refactor Next...() and Prev...() methods to use the new html package's linked list features (Next/PrevSibling, FirstChild). Good performance boost (40+% in some cases).
+* **v0.1.0** : Initial release.
+
+## API
+
+goquery exposes two structs, `Document` and `Selection`, and the `Matcher` interface. Unlike jQuery, which is loaded as part of a DOM document, and thus acts on its containing document, goquery doesn't know which HTML document to act upon. So it needs to be told, and that's what the `Document` type is for. It holds the root document node as the initial Selection value to manipulate.
+
+jQuery often has many variants for the same function (no argument, a selector string argument, a jQuery object argument, a DOM element argument, ...). Instead of exposing the same features in goquery as a single method with variadic empty interface arguments, statically-typed signatures are used following this naming convention:
+
+* When the jQuery equivalent can be called with no argument, it has the same name as jQuery for the no argument signature (e.g.: `Prev()`), and the version with a selector string argument is called `XxxFiltered()` (e.g.: `PrevFiltered()`)
+* When the jQuery equivalent **requires** one argument, the same name as jQuery is used for the selector string version (e.g.: `Is()`)
+* The signatures accepting a jQuery object as argument are defined in goquery as `XxxSelection()` and take a `*Selection` object as argument (e.g.: `FilterSelection()`)
+* The signatures accepting a DOM element as argument in jQuery are defined in goquery as `XxxNodes()` and take a variadic argument of type `*html.Node` (e.g.: `FilterNodes()`)
+* The signatures accepting a function as argument in jQuery are defined in goquery as `XxxFunction()` and take a function as argument (e.g.: `FilterFunction()`)
+* The goquery methods that can be called with a selector string have a corresponding version that take a `Matcher` interface and are defined as `XxxMatcher()` (e.g.: `IsMatcher()`)
+
+Utility functions that are not in jQuery but are useful in Go are implemented as functions (that take a `*Selection` as parameter), to avoid a potential naming clash on the `*Selection`'s methods (reserved for jQuery-equivalent behaviour).
+
+The complete [godoc reference documentation can be found here][doc].
+
+Please note that Cascadia's selectors do not necessarily match all supported selectors of jQuery (Sizzle). See the [cascadia project][cascadia] for details. Invalid selector strings compile to a `Matcher` that fails to match any node. Behaviour of the various functions that take a selector string as argument follows from that fact, e.g. (where `~` is an invalid selector string):
+
+* `Find("~")` returns an empty selection because the selector string doesn't match anything.
+* `Add("~")` returns a new selection that holds the same nodes as the original selection, because it didn't add any node (selector string didn't match anything).
+* `ParentsFiltered("~")` returns an empty selection because the selector string doesn't match anything.
+* `ParentsUntil("~")` returns all parents of the selection because the selector string didn't match any element to stop before the top element.
+
+## Examples
+
+See some tips and tricks in the [wiki][].
+
+Adapted from example_test.go:
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+
+ "github.com/PuerkitoBio/goquery"
+)
+
+func ExampleScrape() {
+ // Request the HTML page.
+ res, err := http.Get("http://metalsucks.net")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ log.Fatalf("status code error: %d %s", res.StatusCode, res.Status)
+ }
+
+ // Load the HTML document
+ doc, err := goquery.NewDocumentFromReader(res.Body)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Find the review items
+ doc.Find(".sidebar-reviews article .content-block").Each(func(i int, s *goquery.Selection) {
+ // For each item found, get the band and title
+ band := s.Find("a").Text()
+ title := s.Find("i").Text()
+ fmt.Printf("Review %d: %s - %s\n", i, band, title)
+ })
+}
+
+func main() {
+ ExampleScrape()
+}
+```
+
+## Related Projects
+
+- [Goq][goq], an HTML deserialization and scraping library based on goquery and struct tags.
+- [andybalholm/cascadia][cascadia], the CSS selector library used by goquery.
+- [suntong/cascadia][cascadiacli], a command-line interface to the cascadia CSS selector library, useful to test selectors.
+- [asciimoo/colly](https://github.com/asciimoo/colly), a lightning fast and elegant Scraping Framework
+- [gnulnx/goperf](https://github.com/gnulnx/goperf), a website performance test tool that also fetches static assets.
+- [MontFerret/ferret](https://github.com/MontFerret/ferret), declarative web scraping.
+
+## Support
+
+There are a number of ways you can support the project:
+
+* Use it, star it, build something with it, spread the word!
+ - If you do build something open-source or otherwise publicly-visible, let me know so I can add it to the [Related Projects](#related-projects) section!
+* Raise issues to improve the project (note: doc typos and clarifications are issues too!)
+ - Please search existing issues before opening a new one - it may have already been adressed.
+* Pull requests: please discuss new code in an issue first, unless the fix is really trivial.
+ - Make sure new code is tested.
+ - Be mindful of existing code - PRs that break existing code have a high probability of being declined, unless it fixes a serious issue.
+
+If you desperately want to send money my way, I have a BuyMeACoffee.com page:
+
+
+
+## License
+
+The [BSD 3-Clause license][bsd], the same as the [Go language][golic]. Cascadia's license is [here][caslic].
+
+[jquery]: http://jquery.com/
+[go]: http://golang.org/
+[cascadia]: https://github.com/andybalholm/cascadia
+[cascadiacli]: https://github.com/suntong/cascadia
+[bsd]: http://opensource.org/licenses/BSD-3-Clause
+[golic]: http://golang.org/LICENSE
+[caslic]: https://github.com/andybalholm/cascadia/blob/master/LICENSE
+[doc]: http://godoc.org/github.com/PuerkitoBio/goquery
+[index]: http://api.jquery.com/index/
+[gonet]: https://github.com/golang/net/
+[html]: http://godoc.org/golang.org/x/net/html
+[wiki]: https://github.com/PuerkitoBio/goquery/wiki/Tips-and-tricks
+[thatguystone]: https://github.com/thatguystone
+[piotr]: https://github.com/piotrkowalczuk
+[goq]: https://github.com/andrewstuart/goq
diff --git a/vendor/github.com/PuerkitoBio/goquery/array.go b/vendor/github.com/PuerkitoBio/goquery/array.go
new file mode 100644
index 000000000..1b1f6cbe6
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/array.go
@@ -0,0 +1,124 @@
+package goquery
+
+import (
+ "golang.org/x/net/html"
+)
+
+const (
+ maxUint = ^uint(0)
+ maxInt = int(maxUint >> 1)
+
+ // ToEnd is a special index value that can be used as end index in a call
+ // to Slice so that all elements are selected until the end of the Selection.
+ // It is equivalent to passing (*Selection).Length().
+ ToEnd = maxInt
+)
+
+// First reduces the set of matched elements to the first in the set.
+// It returns a new Selection object, and an empty Selection object if the
+// the selection is empty.
+func (s *Selection) First() *Selection {
+ return s.Eq(0)
+}
+
+// Last reduces the set of matched elements to the last in the set.
+// It returns a new Selection object, and an empty Selection object if
+// the selection is empty.
+func (s *Selection) Last() *Selection {
+ return s.Eq(-1)
+}
+
+// Eq reduces the set of matched elements to the one at the specified index.
+// If a negative index is given, it counts backwards starting at the end of the
+// set. It returns a new Selection object, and an empty Selection object if the
+// index is invalid.
+func (s *Selection) Eq(index int) *Selection {
+ if index < 0 {
+ index += len(s.Nodes)
+ }
+
+ if index >= len(s.Nodes) || index < 0 {
+ return newEmptySelection(s.document)
+ }
+
+ return s.Slice(index, index+1)
+}
+
+// Slice reduces the set of matched elements to a subset specified by a range
+// of indices. The start index is 0-based and indicates the index of the first
+// element to select. The end index is 0-based and indicates the index at which
+// the elements stop being selected (the end index is not selected).
+//
+// The indices may be negative, in which case they represent an offset from the
+// end of the selection.
+//
+// The special value ToEnd may be specified as end index, in which case all elements
+// until the end are selected. This works both for a positive and negative start
+// index.
+func (s *Selection) Slice(start, end int) *Selection {
+ if start < 0 {
+ start += len(s.Nodes)
+ }
+ if end == ToEnd {
+ end = len(s.Nodes)
+ } else if end < 0 {
+ end += len(s.Nodes)
+ }
+ return pushStack(s, s.Nodes[start:end])
+}
+
+// Get retrieves the underlying node at the specified index.
+// Get without parameter is not implemented, since the node array is available
+// on the Selection object.
+func (s *Selection) Get(index int) *html.Node {
+ if index < 0 {
+ index += len(s.Nodes) // Negative index gets from the end
+ }
+ return s.Nodes[index]
+}
+
+// Index returns the position of the first element within the Selection object
+// relative to its sibling elements.
+func (s *Selection) Index() int {
+ if len(s.Nodes) > 0 {
+ return newSingleSelection(s.Nodes[0], s.document).PrevAll().Length()
+ }
+ return -1
+}
+
+// IndexSelector returns the position of the first element within the
+// Selection object relative to the elements matched by the selector, or -1 if
+// not found.
+func (s *Selection) IndexSelector(selector string) int {
+ if len(s.Nodes) > 0 {
+ sel := s.document.Find(selector)
+ return indexInSlice(sel.Nodes, s.Nodes[0])
+ }
+ return -1
+}
+
+// IndexMatcher returns the position of the first element within the
+// Selection object relative to the elements matched by the matcher, or -1 if
+// not found.
+func (s *Selection) IndexMatcher(m Matcher) int {
+ if len(s.Nodes) > 0 {
+ sel := s.document.FindMatcher(m)
+ return indexInSlice(sel.Nodes, s.Nodes[0])
+ }
+ return -1
+}
+
+// IndexOfNode returns the position of the specified node within the Selection
+// object, or -1 if not found.
+func (s *Selection) IndexOfNode(node *html.Node) int {
+ return indexInSlice(s.Nodes, node)
+}
+
+// IndexOfSelection returns the position of the first node in the specified
+// Selection object within this Selection object, or -1 if not found.
+func (s *Selection) IndexOfSelection(sel *Selection) int {
+ if sel != nil && len(sel.Nodes) > 0 {
+ return indexInSlice(s.Nodes, sel.Nodes[0])
+ }
+ return -1
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/doc.go b/vendor/github.com/PuerkitoBio/goquery/doc.go
new file mode 100644
index 000000000..71146a780
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/doc.go
@@ -0,0 +1,123 @@
+// Copyright (c) 2012-2016, Martin Angers & Contributors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation and/or
+// other materials provided with the distribution.
+// * Neither the name of the author nor the names of its contributors may be used to
+// endorse or promote products derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
+// OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
+// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package goquery implements features similar to jQuery, including the chainable
+syntax, to manipulate and query an HTML document.
+
+It brings a syntax and a set of features similar to jQuery to the Go language.
+It is based on Go's net/html package and the CSS Selector library cascadia.
+Since the net/html parser returns nodes, and not a full-featured DOM
+tree, jQuery's stateful manipulation functions (like height(), css(), detach())
+have been left off.
+
+Also, because the net/html parser requires UTF-8 encoding, so does goquery: it is
+the caller's responsibility to ensure that the source document provides UTF-8 encoded HTML.
+See the repository's wiki for various options on how to do this.
+
+Syntax-wise, it is as close as possible to jQuery, with the same method names when
+possible, and that warm and fuzzy chainable interface. jQuery being the
+ultra-popular library that it is, writing a similar HTML-manipulating
+library was better to follow its API than to start anew (in the same spirit as
+Go's fmt package), even though some of its methods are less than intuitive (looking
+at you, index()...).
+
+It is hosted on GitHub, along with additional documentation in the README.md
+file: https://github.com/puerkitobio/goquery
+
+Please note that because of the net/html dependency, goquery requires Go1.1+.
+
+The various methods are split into files based on the category of behavior.
+The three dots (...) indicate that various "overloads" are available.
+
+* array.go : array-like positional manipulation of the selection.
+ - Eq()
+ - First()
+ - Get()
+ - Index...()
+ - Last()
+ - Slice()
+
+* expand.go : methods that expand or augment the selection's set.
+ - Add...()
+ - AndSelf()
+ - Union(), which is an alias for AddSelection()
+
+* filter.go : filtering methods, that reduce the selection's set.
+ - End()
+ - Filter...()
+ - Has...()
+ - Intersection(), which is an alias of FilterSelection()
+ - Not...()
+
+* iteration.go : methods to loop over the selection's nodes.
+ - Each()
+ - EachWithBreak()
+ - Map()
+
+* manipulation.go : methods for modifying the document
+ - After...()
+ - Append...()
+ - Before...()
+ - Clone()
+ - Empty()
+ - Prepend...()
+ - Remove...()
+ - ReplaceWith...()
+ - Unwrap()
+ - Wrap...()
+ - WrapAll...()
+ - WrapInner...()
+
+* property.go : methods that inspect and get the node's properties values.
+ - Attr*(), RemoveAttr(), SetAttr()
+ - AddClass(), HasClass(), RemoveClass(), ToggleClass()
+ - Html()
+ - Length()
+ - Size(), which is an alias for Length()
+ - Text()
+
+* query.go : methods that query, or reflect, a node's identity.
+ - Contains()
+ - Is...()
+
+* traversal.go : methods to traverse the HTML document tree.
+ - Children...()
+ - Contents()
+ - Find...()
+ - Next...()
+ - Parent[s]...()
+ - Prev...()
+ - Siblings...()
+
+* type.go : definition of the types exposed by goquery.
+ - Document
+ - Selection
+ - Matcher
+
+* utilities.go : definition of helper functions (and not methods on a *Selection)
+that are not part of jQuery, but are useful to goquery.
+ - NodeName
+ - OuterHtml
+*/
+package goquery
diff --git a/vendor/github.com/PuerkitoBio/goquery/expand.go b/vendor/github.com/PuerkitoBio/goquery/expand.go
new file mode 100644
index 000000000..7caade531
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/expand.go
@@ -0,0 +1,70 @@
+package goquery
+
+import "golang.org/x/net/html"
+
+// Add adds the selector string's matching nodes to those in the current
+// selection and returns a new Selection object.
+// The selector string is run in the context of the document of the current
+// Selection object.
+func (s *Selection) Add(selector string) *Selection {
+ return s.AddNodes(findWithMatcher([]*html.Node{s.document.rootNode}, compileMatcher(selector))...)
+}
+
+// AddMatcher adds the matcher's matching nodes to those in the current
+// selection and returns a new Selection object.
+// The matcher is run in the context of the document of the current
+// Selection object.
+func (s *Selection) AddMatcher(m Matcher) *Selection {
+ return s.AddNodes(findWithMatcher([]*html.Node{s.document.rootNode}, m)...)
+}
+
+// AddSelection adds the specified Selection object's nodes to those in the
+// current selection and returns a new Selection object.
+func (s *Selection) AddSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return s.AddNodes()
+ }
+ return s.AddNodes(sel.Nodes...)
+}
+
+// Union is an alias for AddSelection.
+func (s *Selection) Union(sel *Selection) *Selection {
+ return s.AddSelection(sel)
+}
+
+// AddNodes adds the specified nodes to those in the
+// current selection and returns a new Selection object.
+func (s *Selection) AddNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, appendWithoutDuplicates(s.Nodes, nodes, nil))
+}
+
+// AndSelf adds the previous set of elements on the stack to the current set.
+// It returns a new Selection object containing the current Selection combined
+// with the previous one.
+// Deprecated: This function has been deprecated and is now an alias for AddBack().
+func (s *Selection) AndSelf() *Selection {
+ return s.AddBack()
+}
+
+// AddBack adds the previous set of elements on the stack to the current set.
+// It returns a new Selection object containing the current Selection combined
+// with the previous one.
+func (s *Selection) AddBack() *Selection {
+ return s.AddSelection(s.prevSel)
+}
+
+// AddBackFiltered reduces the previous set of elements on the stack to those that
+// match the selector string, and adds them to the current set.
+// It returns a new Selection object containing the current Selection combined
+// with the filtered previous one
+func (s *Selection) AddBackFiltered(selector string) *Selection {
+ return s.AddSelection(s.prevSel.Filter(selector))
+}
+
+// AddBackMatcher reduces the previous set of elements on the stack to those that match
+// the mateher, and adds them to the curernt set.
+// It returns a new Selection object containing the current Selection combined
+// with the filtered previous one
+func (s *Selection) AddBackMatcher(m Matcher) *Selection {
+ return s.AddSelection(s.prevSel.FilterMatcher(m))
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/filter.go b/vendor/github.com/PuerkitoBio/goquery/filter.go
new file mode 100644
index 000000000..9138ffb33
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/filter.go
@@ -0,0 +1,163 @@
+package goquery
+
+import "golang.org/x/net/html"
+
+// Filter reduces the set of matched elements to those that match the selector string.
+// It returns a new Selection object for this subset of matching elements.
+func (s *Selection) Filter(selector string) *Selection {
+ return s.FilterMatcher(compileMatcher(selector))
+}
+
+// FilterMatcher reduces the set of matched elements to those that match
+// the given matcher. It returns a new Selection object for this subset
+// of matching elements.
+func (s *Selection) FilterMatcher(m Matcher) *Selection {
+ return pushStack(s, winnow(s, m, true))
+}
+
+// Not removes elements from the Selection that match the selector string.
+// It returns a new Selection object with the matching elements removed.
+func (s *Selection) Not(selector string) *Selection {
+ return s.NotMatcher(compileMatcher(selector))
+}
+
+// NotMatcher removes elements from the Selection that match the given matcher.
+// It returns a new Selection object with the matching elements removed.
+func (s *Selection) NotMatcher(m Matcher) *Selection {
+ return pushStack(s, winnow(s, m, false))
+}
+
+// FilterFunction reduces the set of matched elements to those that pass the function's test.
+// It returns a new Selection object for this subset of elements.
+func (s *Selection) FilterFunction(f func(int, *Selection) bool) *Selection {
+ return pushStack(s, winnowFunction(s, f, true))
+}
+
+// NotFunction removes elements from the Selection that pass the function's test.
+// It returns a new Selection object with the matching elements removed.
+func (s *Selection) NotFunction(f func(int, *Selection) bool) *Selection {
+ return pushStack(s, winnowFunction(s, f, false))
+}
+
+// FilterNodes reduces the set of matched elements to those that match the specified nodes.
+// It returns a new Selection object for this subset of elements.
+func (s *Selection) FilterNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, winnowNodes(s, nodes, true))
+}
+
+// NotNodes removes elements from the Selection that match the specified nodes.
+// It returns a new Selection object with the matching elements removed.
+func (s *Selection) NotNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, winnowNodes(s, nodes, false))
+}
+
+// FilterSelection reduces the set of matched elements to those that match a
+// node in the specified Selection object.
+// It returns a new Selection object for this subset of elements.
+func (s *Selection) FilterSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return pushStack(s, winnowNodes(s, nil, true))
+ }
+ return pushStack(s, winnowNodes(s, sel.Nodes, true))
+}
+
+// NotSelection removes elements from the Selection that match a node in the specified
+// Selection object. It returns a new Selection object with the matching elements removed.
+func (s *Selection) NotSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return pushStack(s, winnowNodes(s, nil, false))
+ }
+ return pushStack(s, winnowNodes(s, sel.Nodes, false))
+}
+
+// Intersection is an alias for FilterSelection.
+func (s *Selection) Intersection(sel *Selection) *Selection {
+ return s.FilterSelection(sel)
+}
+
+// Has reduces the set of matched elements to those that have a descendant
+// that matches the selector.
+// It returns a new Selection object with the matching elements.
+func (s *Selection) Has(selector string) *Selection {
+ return s.HasSelection(s.document.Find(selector))
+}
+
+// HasMatcher reduces the set of matched elements to those that have a descendant
+// that matches the matcher.
+// It returns a new Selection object with the matching elements.
+func (s *Selection) HasMatcher(m Matcher) *Selection {
+ return s.HasSelection(s.document.FindMatcher(m))
+}
+
+// HasNodes reduces the set of matched elements to those that have a
+// descendant that matches one of the nodes.
+// It returns a new Selection object with the matching elements.
+func (s *Selection) HasNodes(nodes ...*html.Node) *Selection {
+ return s.FilterFunction(func(_ int, sel *Selection) bool {
+ // Add all nodes that contain one of the specified nodes
+ for _, n := range nodes {
+ if sel.Contains(n) {
+ return true
+ }
+ }
+ return false
+ })
+}
+
+// HasSelection reduces the set of matched elements to those that have a
+// descendant that matches one of the nodes of the specified Selection object.
+// It returns a new Selection object with the matching elements.
+func (s *Selection) HasSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return s.HasNodes()
+ }
+ return s.HasNodes(sel.Nodes...)
+}
+
+// End ends the most recent filtering operation in the current chain and
+// returns the set of matched elements to its previous state.
+func (s *Selection) End() *Selection {
+ if s.prevSel != nil {
+ return s.prevSel
+ }
+ return newEmptySelection(s.document)
+}
+
+// Filter based on the matcher, and the indicator to keep (Filter) or
+// to get rid of (Not) the matching elements.
+func winnow(sel *Selection, m Matcher, keep bool) []*html.Node {
+ // Optimize if keep is requested
+ if keep {
+ return m.Filter(sel.Nodes)
+ }
+ // Use grep
+ return grep(sel, func(i int, s *Selection) bool {
+ return !m.Match(s.Get(0))
+ })
+}
+
+// Filter based on an array of nodes, and the indicator to keep (Filter) or
+// to get rid of (Not) the matching elements.
+func winnowNodes(sel *Selection, nodes []*html.Node, keep bool) []*html.Node {
+ if len(nodes)+len(sel.Nodes) < minNodesForSet {
+ return grep(sel, func(i int, s *Selection) bool {
+ return isInSlice(nodes, s.Get(0)) == keep
+ })
+ }
+
+ set := make(map[*html.Node]bool)
+ for _, n := range nodes {
+ set[n] = true
+ }
+ return grep(sel, func(i int, s *Selection) bool {
+ return set[s.Get(0)] == keep
+ })
+}
+
+// Filter based on a function test, and the indicator to keep (Filter) or
+// to get rid of (Not) the matching elements.
+func winnowFunction(sel *Selection, f func(int, *Selection) bool, keep bool) []*html.Node {
+ return grep(sel, func(i int, s *Selection) bool {
+ return f(i, s) == keep
+ })
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/go.mod b/vendor/github.com/PuerkitoBio/goquery/go.mod
new file mode 100644
index 000000000..2fa1332a5
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/go.mod
@@ -0,0 +1,6 @@
+module github.com/PuerkitoBio/goquery
+
+require (
+ github.com/andybalholm/cascadia v1.0.0
+ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a
+)
diff --git a/vendor/github.com/PuerkitoBio/goquery/go.sum b/vendor/github.com/PuerkitoBio/goquery/go.sum
new file mode 100644
index 000000000..11c575754
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/go.sum
@@ -0,0 +1,5 @@
+github.com/andybalholm/cascadia v1.0.0 h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=
+github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
+golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
diff --git a/vendor/github.com/PuerkitoBio/goquery/iteration.go b/vendor/github.com/PuerkitoBio/goquery/iteration.go
new file mode 100644
index 000000000..e246f2e0e
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/iteration.go
@@ -0,0 +1,39 @@
+package goquery
+
+// Each iterates over a Selection object, executing a function for each
+// matched element. It returns the current Selection object. The function
+// f is called for each element in the selection with the index of the
+// element in that selection starting at 0, and a *Selection that contains
+// only that element.
+func (s *Selection) Each(f func(int, *Selection)) *Selection {
+ for i, n := range s.Nodes {
+ f(i, newSingleSelection(n, s.document))
+ }
+ return s
+}
+
+// EachWithBreak iterates over a Selection object, executing a function for each
+// matched element. It is identical to Each except that it is possible to break
+// out of the loop by returning false in the callback function. It returns the
+// current Selection object.
+func (s *Selection) EachWithBreak(f func(int, *Selection) bool) *Selection {
+ for i, n := range s.Nodes {
+ if !f(i, newSingleSelection(n, s.document)) {
+ return s
+ }
+ }
+ return s
+}
+
+// Map passes each element in the current matched set through a function,
+// producing a slice of string holding the returned values. The function
+// f is called for each element in the selection with the index of the
+// element in that selection starting at 0, and a *Selection that contains
+// only that element.
+func (s *Selection) Map(f func(int, *Selection) string) (result []string) {
+ for i, n := range s.Nodes {
+ result = append(result, f(i, newSingleSelection(n, s.document)))
+ }
+
+ return result
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/manipulation.go b/vendor/github.com/PuerkitoBio/goquery/manipulation.go
new file mode 100644
index 000000000..34eb7570f
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/manipulation.go
@@ -0,0 +1,574 @@
+package goquery
+
+import (
+ "strings"
+
+ "golang.org/x/net/html"
+)
+
+// After applies the selector from the root document and inserts the matched elements
+// after the elements in the set of matched elements.
+//
+// If one of the matched elements in the selection is not currently in the
+// document, it's impossible to insert nodes after it, so it will be ignored.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) After(selector string) *Selection {
+ return s.AfterMatcher(compileMatcher(selector))
+}
+
+// AfterMatcher applies the matcher from the root document and inserts the matched elements
+// after the elements in the set of matched elements.
+//
+// If one of the matched elements in the selection is not currently in the
+// document, it's impossible to insert nodes after it, so it will be ignored.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AfterMatcher(m Matcher) *Selection {
+ return s.AfterNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// AfterSelection inserts the elements in the selection after each element in the set of matched
+// elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AfterSelection(sel *Selection) *Selection {
+ return s.AfterNodes(sel.Nodes...)
+}
+
+// AfterHtml parses the html and inserts it after the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AfterHtml(html string) *Selection {
+ return s.AfterNodes(parseHtml(html)...)
+}
+
+// AfterNodes inserts the nodes after each element in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AfterNodes(ns ...*html.Node) *Selection {
+ return s.manipulateNodes(ns, true, func(sn *html.Node, n *html.Node) {
+ if sn.Parent != nil {
+ sn.Parent.InsertBefore(n, sn.NextSibling)
+ }
+ })
+}
+
+// Append appends the elements specified by the selector to the end of each element
+// in the set of matched elements, following those rules:
+//
+// 1) The selector is applied to the root document.
+//
+// 2) Elements that are part of the document will be moved to the new location.
+//
+// 3) If there are multiple locations to append to, cloned nodes will be
+// appended to all target locations except the last one, which will be moved
+// as noted in (2).
+func (s *Selection) Append(selector string) *Selection {
+ return s.AppendMatcher(compileMatcher(selector))
+}
+
+// AppendMatcher appends the elements specified by the matcher to the end of each element
+// in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AppendMatcher(m Matcher) *Selection {
+ return s.AppendNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// AppendSelection appends the elements in the selection to the end of each element
+// in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AppendSelection(sel *Selection) *Selection {
+ return s.AppendNodes(sel.Nodes...)
+}
+
+// AppendHtml parses the html and appends it to the set of matched elements.
+func (s *Selection) AppendHtml(html string) *Selection {
+ return s.AppendNodes(parseHtml(html)...)
+}
+
+// AppendNodes appends the specified nodes to each node in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AppendNodes(ns ...*html.Node) *Selection {
+ return s.manipulateNodes(ns, false, func(sn *html.Node, n *html.Node) {
+ sn.AppendChild(n)
+ })
+}
+
+// Before inserts the matched elements before each element in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) Before(selector string) *Selection {
+ return s.BeforeMatcher(compileMatcher(selector))
+}
+
+// BeforeMatcher inserts the matched elements before each element in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) BeforeMatcher(m Matcher) *Selection {
+ return s.BeforeNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// BeforeSelection inserts the elements in the selection before each element in the set of matched
+// elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) BeforeSelection(sel *Selection) *Selection {
+ return s.BeforeNodes(sel.Nodes...)
+}
+
+// BeforeHtml parses the html and inserts it before the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) BeforeHtml(html string) *Selection {
+ return s.BeforeNodes(parseHtml(html)...)
+}
+
+// BeforeNodes inserts the nodes before each element in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) BeforeNodes(ns ...*html.Node) *Selection {
+ return s.manipulateNodes(ns, false, func(sn *html.Node, n *html.Node) {
+ if sn.Parent != nil {
+ sn.Parent.InsertBefore(n, sn)
+ }
+ })
+}
+
+// Clone creates a deep copy of the set of matched nodes. The new nodes will not be
+// attached to the document.
+func (s *Selection) Clone() *Selection {
+ ns := newEmptySelection(s.document)
+ ns.Nodes = cloneNodes(s.Nodes)
+ return ns
+}
+
+// Empty removes all children nodes from the set of matched elements.
+// It returns the children nodes in a new Selection.
+func (s *Selection) Empty() *Selection {
+ var nodes []*html.Node
+
+ for _, n := range s.Nodes {
+ for c := n.FirstChild; c != nil; c = n.FirstChild {
+ n.RemoveChild(c)
+ nodes = append(nodes, c)
+ }
+ }
+
+ return pushStack(s, nodes)
+}
+
+// Prepend prepends the elements specified by the selector to each element in
+// the set of matched elements, following the same rules as Append.
+func (s *Selection) Prepend(selector string) *Selection {
+ return s.PrependMatcher(compileMatcher(selector))
+}
+
+// PrependMatcher prepends the elements specified by the matcher to each
+// element in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) PrependMatcher(m Matcher) *Selection {
+ return s.PrependNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// PrependSelection prepends the elements in the selection to each element in
+// the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) PrependSelection(sel *Selection) *Selection {
+ return s.PrependNodes(sel.Nodes...)
+}
+
+// PrependHtml parses the html and prepends it to the set of matched elements.
+func (s *Selection) PrependHtml(html string) *Selection {
+ return s.PrependNodes(parseHtml(html)...)
+}
+
+// PrependNodes prepends the specified nodes to each node in the set of
+// matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) PrependNodes(ns ...*html.Node) *Selection {
+ return s.manipulateNodes(ns, true, func(sn *html.Node, n *html.Node) {
+ // sn.FirstChild may be nil, in which case this functions like
+ // sn.AppendChild()
+ sn.InsertBefore(n, sn.FirstChild)
+ })
+}
+
+// Remove removes the set of matched elements from the document.
+// It returns the same selection, now consisting of nodes not in the document.
+func (s *Selection) Remove() *Selection {
+ for _, n := range s.Nodes {
+ if n.Parent != nil {
+ n.Parent.RemoveChild(n)
+ }
+ }
+
+ return s
+}
+
+// RemoveFiltered removes the set of matched elements by selector.
+// It returns the Selection of removed nodes.
+func (s *Selection) RemoveFiltered(selector string) *Selection {
+ return s.RemoveMatcher(compileMatcher(selector))
+}
+
+// RemoveMatcher removes the set of matched elements.
+// It returns the Selection of removed nodes.
+func (s *Selection) RemoveMatcher(m Matcher) *Selection {
+ return s.FilterMatcher(m).Remove()
+}
+
+// ReplaceWith replaces each element in the set of matched elements with the
+// nodes matched by the given selector.
+// It returns the removed elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) ReplaceWith(selector string) *Selection {
+ return s.ReplaceWithMatcher(compileMatcher(selector))
+}
+
+// ReplaceWithMatcher replaces each element in the set of matched elements with
+// the nodes matched by the given Matcher.
+// It returns the removed elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) ReplaceWithMatcher(m Matcher) *Selection {
+ return s.ReplaceWithNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// ReplaceWithSelection replaces each element in the set of matched elements with
+// the nodes from the given Selection.
+// It returns the removed elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) ReplaceWithSelection(sel *Selection) *Selection {
+ return s.ReplaceWithNodes(sel.Nodes...)
+}
+
+// ReplaceWithHtml replaces each element in the set of matched elements with
+// the parsed HTML.
+// It returns the removed elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) ReplaceWithHtml(html string) *Selection {
+ return s.ReplaceWithNodes(parseHtml(html)...)
+}
+
+// ReplaceWithNodes replaces each element in the set of matched elements with
+// the given nodes.
+// It returns the removed elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) ReplaceWithNodes(ns ...*html.Node) *Selection {
+ s.AfterNodes(ns...)
+ return s.Remove()
+}
+
+// SetHtml sets the html content of each element in the selection to
+// specified html string.
+func (s *Selection) SetHtml(html string) *Selection {
+ return setHtmlNodes(s, parseHtml(html)...)
+}
+
+// SetText sets the content of each element in the selection to specified content.
+// The provided text string is escaped.
+func (s *Selection) SetText(text string) *Selection {
+ return s.SetHtml(html.EscapeString(text))
+}
+
+// Unwrap removes the parents of the set of matched elements, leaving the matched
+// elements (and their siblings, if any) in their place.
+// It returns the original selection.
+func (s *Selection) Unwrap() *Selection {
+ s.Parent().Each(func(i int, ss *Selection) {
+ // For some reason, jquery allows unwrap to remove the element, so
+ // allowing it here too. Same for . Why it allows those elements to
+ // be unwrapped while not allowing body is a mystery to me.
+ if ss.Nodes[0].Data != "body" {
+ ss.ReplaceWithSelection(ss.Contents())
+ }
+ })
+
+ return s
+}
+
+// Wrap wraps each element in the set of matched elements inside the first
+// element matched by the given selector. The matched child is cloned before
+// being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) Wrap(selector string) *Selection {
+ return s.WrapMatcher(compileMatcher(selector))
+}
+
+// WrapMatcher wraps each element in the set of matched elements inside the
+// first element matched by the given matcher. The matched child is cloned
+// before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapMatcher(m Matcher) *Selection {
+ return s.wrapNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// WrapSelection wraps each element in the set of matched elements inside the
+// first element in the given Selection. The element is cloned before being
+// inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapSelection(sel *Selection) *Selection {
+ return s.wrapNodes(sel.Nodes...)
+}
+
+// WrapHtml wraps each element in the set of matched elements inside the inner-
+// most child of the given HTML.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapHtml(html string) *Selection {
+ return s.wrapNodes(parseHtml(html)...)
+}
+
+// WrapNode wraps each element in the set of matched elements inside the inner-
+// most child of the given node. The given node is copied before being inserted
+// into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapNode(n *html.Node) *Selection {
+ return s.wrapNodes(n)
+}
+
+func (s *Selection) wrapNodes(ns ...*html.Node) *Selection {
+ s.Each(func(i int, ss *Selection) {
+ ss.wrapAllNodes(ns...)
+ })
+
+ return s
+}
+
+// WrapAll wraps a single HTML structure, matched by the given selector, around
+// all elements in the set of matched elements. The matched child is cloned
+// before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapAll(selector string) *Selection {
+ return s.WrapAllMatcher(compileMatcher(selector))
+}
+
+// WrapAllMatcher wraps a single HTML structure, matched by the given Matcher,
+// around all elements in the set of matched elements. The matched child is
+// cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapAllMatcher(m Matcher) *Selection {
+ return s.wrapAllNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// WrapAllSelection wraps a single HTML structure, the first node of the given
+// Selection, around all elements in the set of matched elements. The matched
+// child is cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapAllSelection(sel *Selection) *Selection {
+ return s.wrapAllNodes(sel.Nodes...)
+}
+
+// WrapAllHtml wraps the given HTML structure around all elements in the set of
+// matched elements. The matched child is cloned before being inserted into the
+// document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapAllHtml(html string) *Selection {
+ return s.wrapAllNodes(parseHtml(html)...)
+}
+
+func (s *Selection) wrapAllNodes(ns ...*html.Node) *Selection {
+ if len(ns) > 0 {
+ return s.WrapAllNode(ns[0])
+ }
+ return s
+}
+
+// WrapAllNode wraps the given node around the first element in the Selection,
+// making all other nodes in the Selection children of the given node. The node
+// is cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapAllNode(n *html.Node) *Selection {
+ if s.Size() == 0 {
+ return s
+ }
+
+ wrap := cloneNode(n)
+
+ first := s.Nodes[0]
+ if first.Parent != nil {
+ first.Parent.InsertBefore(wrap, first)
+ first.Parent.RemoveChild(first)
+ }
+
+ for c := getFirstChildEl(wrap); c != nil; c = getFirstChildEl(wrap) {
+ wrap = c
+ }
+
+ newSingleSelection(wrap, s.document).AppendSelection(s)
+
+ return s
+}
+
+// WrapInner wraps an HTML structure, matched by the given selector, around the
+// content of element in the set of matched elements. The matched child is
+// cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapInner(selector string) *Selection {
+ return s.WrapInnerMatcher(compileMatcher(selector))
+}
+
+// WrapInnerMatcher wraps an HTML structure, matched by the given selector,
+// around the content of element in the set of matched elements. The matched
+// child is cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapInnerMatcher(m Matcher) *Selection {
+ return s.wrapInnerNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// WrapInnerSelection wraps an HTML structure, matched by the given selector,
+// around the content of element in the set of matched elements. The matched
+// child is cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapInnerSelection(sel *Selection) *Selection {
+ return s.wrapInnerNodes(sel.Nodes...)
+}
+
+// WrapInnerHtml wraps an HTML structure, matched by the given selector, around
+// the content of element in the set of matched elements. The matched child is
+// cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapInnerHtml(html string) *Selection {
+ return s.wrapInnerNodes(parseHtml(html)...)
+}
+
+// WrapInnerNode wraps an HTML structure, matched by the given selector, around
+// the content of element in the set of matched elements. The matched child is
+// cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapInnerNode(n *html.Node) *Selection {
+ return s.wrapInnerNodes(n)
+}
+
+func (s *Selection) wrapInnerNodes(ns ...*html.Node) *Selection {
+ if len(ns) == 0 {
+ return s
+ }
+
+ s.Each(func(i int, s *Selection) {
+ contents := s.Contents()
+
+ if contents.Size() > 0 {
+ contents.wrapAllNodes(ns...)
+ } else {
+ s.AppendNodes(cloneNode(ns[0]))
+ }
+ })
+
+ return s
+}
+
+func parseHtml(h string) []*html.Node {
+ // Errors are only returned when the io.Reader returns any error besides
+ // EOF, but strings.Reader never will
+ nodes, err := html.ParseFragment(strings.NewReader(h), &html.Node{Type: html.ElementNode})
+ if err != nil {
+ panic("goquery: failed to parse HTML: " + err.Error())
+ }
+ return nodes
+}
+
+func setHtmlNodes(s *Selection, ns ...*html.Node) *Selection {
+ for _, n := range s.Nodes {
+ for c := n.FirstChild; c != nil; c = n.FirstChild {
+ n.RemoveChild(c)
+ }
+ for _, c := range ns {
+ n.AppendChild(cloneNode(c))
+ }
+ }
+ return s
+}
+
+// Get the first child that is an ElementNode
+func getFirstChildEl(n *html.Node) *html.Node {
+ c := n.FirstChild
+ for c != nil && c.Type != html.ElementNode {
+ c = c.NextSibling
+ }
+ return c
+}
+
+// Deep copy a slice of nodes.
+func cloneNodes(ns []*html.Node) []*html.Node {
+ cns := make([]*html.Node, 0, len(ns))
+
+ for _, n := range ns {
+ cns = append(cns, cloneNode(n))
+ }
+
+ return cns
+}
+
+// Deep copy a node. The new node has clones of all the original node's
+// children but none of its parents or siblings.
+func cloneNode(n *html.Node) *html.Node {
+ nn := &html.Node{
+ Type: n.Type,
+ DataAtom: n.DataAtom,
+ Data: n.Data,
+ Attr: make([]html.Attribute, len(n.Attr)),
+ }
+
+ copy(nn.Attr, n.Attr)
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ nn.AppendChild(cloneNode(c))
+ }
+
+ return nn
+}
+
+func (s *Selection) manipulateNodes(ns []*html.Node, reverse bool,
+ f func(sn *html.Node, n *html.Node)) *Selection {
+
+ lasti := s.Size() - 1
+
+ // net.Html doesn't provide document fragments for insertion, so to get
+ // things in the correct order with After() and Prepend(), the callback
+ // needs to be called on the reverse of the nodes.
+ if reverse {
+ for i, j := 0, len(ns)-1; i < j; i, j = i+1, j-1 {
+ ns[i], ns[j] = ns[j], ns[i]
+ }
+ }
+
+ for i, sn := range s.Nodes {
+ for _, n := range ns {
+ if i != lasti {
+ f(sn, cloneNode(n))
+ } else {
+ if n.Parent != nil {
+ n.Parent.RemoveChild(n)
+ }
+ f(sn, n)
+ }
+ }
+ }
+
+ return s
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/property.go b/vendor/github.com/PuerkitoBio/goquery/property.go
new file mode 100644
index 000000000..411126db2
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/property.go
@@ -0,0 +1,275 @@
+package goquery
+
+import (
+ "bytes"
+ "regexp"
+ "strings"
+
+ "golang.org/x/net/html"
+)
+
+var rxClassTrim = regexp.MustCompile("[\t\r\n]")
+
+// Attr gets the specified attribute's value for the first element in the
+// Selection. To get the value for each element individually, use a looping
+// construct such as Each or Map method.
+func (s *Selection) Attr(attrName string) (val string, exists bool) {
+ if len(s.Nodes) == 0 {
+ return
+ }
+ return getAttributeValue(attrName, s.Nodes[0])
+}
+
+// AttrOr works like Attr but returns default value if attribute is not present.
+func (s *Selection) AttrOr(attrName, defaultValue string) string {
+ if len(s.Nodes) == 0 {
+ return defaultValue
+ }
+
+ val, exists := getAttributeValue(attrName, s.Nodes[0])
+ if !exists {
+ return defaultValue
+ }
+
+ return val
+}
+
+// RemoveAttr removes the named attribute from each element in the set of matched elements.
+func (s *Selection) RemoveAttr(attrName string) *Selection {
+ for _, n := range s.Nodes {
+ removeAttr(n, attrName)
+ }
+
+ return s
+}
+
+// SetAttr sets the given attribute on each element in the set of matched elements.
+func (s *Selection) SetAttr(attrName, val string) *Selection {
+ for _, n := range s.Nodes {
+ attr := getAttributePtr(attrName, n)
+ if attr == nil {
+ n.Attr = append(n.Attr, html.Attribute{Key: attrName, Val: val})
+ } else {
+ attr.Val = val
+ }
+ }
+
+ return s
+}
+
+// Text gets the combined text contents of each element in the set of matched
+// elements, including their descendants.
+func (s *Selection) Text() string {
+ var buf bytes.Buffer
+
+ // Slightly optimized vs calling Each: no single selection object created
+ var f func(*html.Node)
+ f = func(n *html.Node) {
+ if n.Type == html.TextNode {
+ // Keep newlines and spaces, like jQuery
+ buf.WriteString(n.Data)
+ }
+ if n.FirstChild != nil {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ f(c)
+ }
+ }
+ }
+ for _, n := range s.Nodes {
+ f(n)
+ }
+
+ return buf.String()
+}
+
+// Size is an alias for Length.
+func (s *Selection) Size() int {
+ return s.Length()
+}
+
+// Length returns the number of elements in the Selection object.
+func (s *Selection) Length() int {
+ return len(s.Nodes)
+}
+
+// Html gets the HTML contents of the first element in the set of matched
+// elements. It includes text and comment nodes.
+func (s *Selection) Html() (ret string, e error) {
+ // Since there is no .innerHtml, the HTML content must be re-created from
+ // the nodes using html.Render.
+ var buf bytes.Buffer
+
+ if len(s.Nodes) > 0 {
+ for c := s.Nodes[0].FirstChild; c != nil; c = c.NextSibling {
+ e = html.Render(&buf, c)
+ if e != nil {
+ return
+ }
+ }
+ ret = buf.String()
+ }
+
+ return
+}
+
+// AddClass adds the given class(es) to each element in the set of matched elements.
+// Multiple class names can be specified, separated by a space or via multiple arguments.
+func (s *Selection) AddClass(class ...string) *Selection {
+ classStr := strings.TrimSpace(strings.Join(class, " "))
+
+ if classStr == "" {
+ return s
+ }
+
+ tcls := getClassesSlice(classStr)
+ for _, n := range s.Nodes {
+ curClasses, attr := getClassesAndAttr(n, true)
+ for _, newClass := range tcls {
+ if !strings.Contains(curClasses, " "+newClass+" ") {
+ curClasses += newClass + " "
+ }
+ }
+
+ setClasses(n, attr, curClasses)
+ }
+
+ return s
+}
+
+// HasClass determines whether any of the matched elements are assigned the
+// given class.
+func (s *Selection) HasClass(class string) bool {
+ class = " " + class + " "
+ for _, n := range s.Nodes {
+ classes, _ := getClassesAndAttr(n, false)
+ if strings.Contains(classes, class) {
+ return true
+ }
+ }
+ return false
+}
+
+// RemoveClass removes the given class(es) from each element in the set of matched elements.
+// Multiple class names can be specified, separated by a space or via multiple arguments.
+// If no class name is provided, all classes are removed.
+func (s *Selection) RemoveClass(class ...string) *Selection {
+ var rclasses []string
+
+ classStr := strings.TrimSpace(strings.Join(class, " "))
+ remove := classStr == ""
+
+ if !remove {
+ rclasses = getClassesSlice(classStr)
+ }
+
+ for _, n := range s.Nodes {
+ if remove {
+ removeAttr(n, "class")
+ } else {
+ classes, attr := getClassesAndAttr(n, true)
+ for _, rcl := range rclasses {
+ classes = strings.Replace(classes, " "+rcl+" ", " ", -1)
+ }
+
+ setClasses(n, attr, classes)
+ }
+ }
+
+ return s
+}
+
+// ToggleClass adds or removes the given class(es) for each element in the set of matched elements.
+// Multiple class names can be specified, separated by a space or via multiple arguments.
+func (s *Selection) ToggleClass(class ...string) *Selection {
+ classStr := strings.TrimSpace(strings.Join(class, " "))
+
+ if classStr == "" {
+ return s
+ }
+
+ tcls := getClassesSlice(classStr)
+
+ for _, n := range s.Nodes {
+ classes, attr := getClassesAndAttr(n, true)
+ for _, tcl := range tcls {
+ if strings.Contains(classes, " "+tcl+" ") {
+ classes = strings.Replace(classes, " "+tcl+" ", " ", -1)
+ } else {
+ classes += tcl + " "
+ }
+ }
+
+ setClasses(n, attr, classes)
+ }
+
+ return s
+}
+
+func getAttributePtr(attrName string, n *html.Node) *html.Attribute {
+ if n == nil {
+ return nil
+ }
+
+ for i, a := range n.Attr {
+ if a.Key == attrName {
+ return &n.Attr[i]
+ }
+ }
+ return nil
+}
+
+// Private function to get the specified attribute's value from a node.
+func getAttributeValue(attrName string, n *html.Node) (val string, exists bool) {
+ if a := getAttributePtr(attrName, n); a != nil {
+ val = a.Val
+ exists = true
+ }
+ return
+}
+
+// Get and normalize the "class" attribute from the node.
+func getClassesAndAttr(n *html.Node, create bool) (classes string, attr *html.Attribute) {
+ // Applies only to element nodes
+ if n.Type == html.ElementNode {
+ attr = getAttributePtr("class", n)
+ if attr == nil && create {
+ n.Attr = append(n.Attr, html.Attribute{
+ Key: "class",
+ Val: "",
+ })
+ attr = &n.Attr[len(n.Attr)-1]
+ }
+ }
+
+ if attr == nil {
+ classes = " "
+ } else {
+ classes = rxClassTrim.ReplaceAllString(" "+attr.Val+" ", " ")
+ }
+
+ return
+}
+
+func getClassesSlice(classes string) []string {
+ return strings.Split(rxClassTrim.ReplaceAllString(" "+classes+" ", " "), " ")
+}
+
+func removeAttr(n *html.Node, attrName string) {
+ for i, a := range n.Attr {
+ if a.Key == attrName {
+ n.Attr[i], n.Attr[len(n.Attr)-1], n.Attr =
+ n.Attr[len(n.Attr)-1], html.Attribute{}, n.Attr[:len(n.Attr)-1]
+ return
+ }
+ }
+}
+
+func setClasses(n *html.Node, attr *html.Attribute, classes string) {
+ classes = strings.TrimSpace(classes)
+ if classes == "" {
+ removeAttr(n, "class")
+ return
+ }
+
+ attr.Val = classes
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/query.go b/vendor/github.com/PuerkitoBio/goquery/query.go
new file mode 100644
index 000000000..fe86bf0bf
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/query.go
@@ -0,0 +1,49 @@
+package goquery
+
+import "golang.org/x/net/html"
+
+// Is checks the current matched set of elements against a selector and
+// returns true if at least one of these elements matches.
+func (s *Selection) Is(selector string) bool {
+ return s.IsMatcher(compileMatcher(selector))
+}
+
+// IsMatcher checks the current matched set of elements against a matcher and
+// returns true if at least one of these elements matches.
+func (s *Selection) IsMatcher(m Matcher) bool {
+ if len(s.Nodes) > 0 {
+ if len(s.Nodes) == 1 {
+ return m.Match(s.Nodes[0])
+ }
+ return len(m.Filter(s.Nodes)) > 0
+ }
+
+ return false
+}
+
+// IsFunction checks the current matched set of elements against a predicate and
+// returns true if at least one of these elements matches.
+func (s *Selection) IsFunction(f func(int, *Selection) bool) bool {
+ return s.FilterFunction(f).Length() > 0
+}
+
+// IsSelection checks the current matched set of elements against a Selection object
+// and returns true if at least one of these elements matches.
+func (s *Selection) IsSelection(sel *Selection) bool {
+ return s.FilterSelection(sel).Length() > 0
+}
+
+// IsNodes checks the current matched set of elements against the specified nodes
+// and returns true if at least one of these elements matches.
+func (s *Selection) IsNodes(nodes ...*html.Node) bool {
+ return s.FilterNodes(nodes...).Length() > 0
+}
+
+// Contains returns true if the specified Node is within,
+// at any depth, one of the nodes in the Selection object.
+// It is NOT inclusive, to behave like jQuery's implementation, and
+// unlike Javascript's .contains, so if the contained
+// node is itself in the selection, it returns false.
+func (s *Selection) Contains(n *html.Node) bool {
+ return sliceContains(s.Nodes, n)
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/traversal.go b/vendor/github.com/PuerkitoBio/goquery/traversal.go
new file mode 100644
index 000000000..5fa5315ac
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/traversal.go
@@ -0,0 +1,698 @@
+package goquery
+
+import "golang.org/x/net/html"
+
+type siblingType int
+
+// Sibling type, used internally when iterating over children at the same
+// level (siblings) to specify which nodes are requested.
+const (
+ siblingPrevUntil siblingType = iota - 3
+ siblingPrevAll
+ siblingPrev
+ siblingAll
+ siblingNext
+ siblingNextAll
+ siblingNextUntil
+ siblingAllIncludingNonElements
+)
+
+// Find gets the descendants of each element in the current set of matched
+// elements, filtered by a selector. It returns a new Selection object
+// containing these matched elements.
+func (s *Selection) Find(selector string) *Selection {
+ return pushStack(s, findWithMatcher(s.Nodes, compileMatcher(selector)))
+}
+
+// FindMatcher gets the descendants of each element in the current set of matched
+// elements, filtered by the matcher. It returns a new Selection object
+// containing these matched elements.
+func (s *Selection) FindMatcher(m Matcher) *Selection {
+ return pushStack(s, findWithMatcher(s.Nodes, m))
+}
+
+// FindSelection gets the descendants of each element in the current
+// Selection, filtered by a Selection. It returns a new Selection object
+// containing these matched elements.
+func (s *Selection) FindSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return pushStack(s, nil)
+ }
+ return s.FindNodes(sel.Nodes...)
+}
+
+// FindNodes gets the descendants of each element in the current
+// Selection, filtered by some nodes. It returns a new Selection object
+// containing these matched elements.
+func (s *Selection) FindNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
+ if sliceContains(s.Nodes, n) {
+ return []*html.Node{n}
+ }
+ return nil
+ }))
+}
+
+// Contents gets the children of each element in the Selection,
+// including text and comment nodes. It returns a new Selection object
+// containing these elements.
+func (s *Selection) Contents() *Selection {
+ return pushStack(s, getChildrenNodes(s.Nodes, siblingAllIncludingNonElements))
+}
+
+// ContentsFiltered gets the children of each element in the Selection,
+// filtered by the specified selector. It returns a new Selection
+// object containing these elements. Since selectors only act on Element nodes,
+// this function is an alias to ChildrenFiltered unless the selector is empty,
+// in which case it is an alias to Contents.
+func (s *Selection) ContentsFiltered(selector string) *Selection {
+ if selector != "" {
+ return s.ChildrenFiltered(selector)
+ }
+ return s.Contents()
+}
+
+// ContentsMatcher gets the children of each element in the Selection,
+// filtered by the specified matcher. It returns a new Selection
+// object containing these elements. Since matchers only act on Element nodes,
+// this function is an alias to ChildrenMatcher.
+func (s *Selection) ContentsMatcher(m Matcher) *Selection {
+ return s.ChildrenMatcher(m)
+}
+
+// Children gets the child elements of each element in the Selection.
+// It returns a new Selection object containing these elements.
+func (s *Selection) Children() *Selection {
+ return pushStack(s, getChildrenNodes(s.Nodes, siblingAll))
+}
+
+// ChildrenFiltered gets the child elements of each element in the Selection,
+// filtered by the specified selector. It returns a new
+// Selection object containing these elements.
+func (s *Selection) ChildrenFiltered(selector string) *Selection {
+ return filterAndPush(s, getChildrenNodes(s.Nodes, siblingAll), compileMatcher(selector))
+}
+
+// ChildrenMatcher gets the child elements of each element in the Selection,
+// filtered by the specified matcher. It returns a new
+// Selection object containing these elements.
+func (s *Selection) ChildrenMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getChildrenNodes(s.Nodes, siblingAll), m)
+}
+
+// Parent gets the parent of each element in the Selection. It returns a
+// new Selection object containing the matched elements.
+func (s *Selection) Parent() *Selection {
+ return pushStack(s, getParentNodes(s.Nodes))
+}
+
+// ParentFiltered gets the parent of each element in the Selection filtered by a
+// selector. It returns a new Selection object containing the matched elements.
+func (s *Selection) ParentFiltered(selector string) *Selection {
+ return filterAndPush(s, getParentNodes(s.Nodes), compileMatcher(selector))
+}
+
+// ParentMatcher gets the parent of each element in the Selection filtered by a
+// matcher. It returns a new Selection object containing the matched elements.
+func (s *Selection) ParentMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getParentNodes(s.Nodes), m)
+}
+
+// Closest gets the first element that matches the selector by testing the
+// element itself and traversing up through its ancestors in the DOM tree.
+func (s *Selection) Closest(selector string) *Selection {
+ cs := compileMatcher(selector)
+ return s.ClosestMatcher(cs)
+}
+
+// ClosestMatcher gets the first element that matches the matcher by testing the
+// element itself and traversing up through its ancestors in the DOM tree.
+func (s *Selection) ClosestMatcher(m Matcher) *Selection {
+ return pushStack(s, mapNodes(s.Nodes, func(i int, n *html.Node) []*html.Node {
+ // For each node in the selection, test the node itself, then each parent
+ // until a match is found.
+ for ; n != nil; n = n.Parent {
+ if m.Match(n) {
+ return []*html.Node{n}
+ }
+ }
+ return nil
+ }))
+}
+
+// ClosestNodes gets the first element that matches one of the nodes by testing the
+// element itself and traversing up through its ancestors in the DOM tree.
+func (s *Selection) ClosestNodes(nodes ...*html.Node) *Selection {
+ set := make(map[*html.Node]bool)
+ for _, n := range nodes {
+ set[n] = true
+ }
+ return pushStack(s, mapNodes(s.Nodes, func(i int, n *html.Node) []*html.Node {
+ // For each node in the selection, test the node itself, then each parent
+ // until a match is found.
+ for ; n != nil; n = n.Parent {
+ if set[n] {
+ return []*html.Node{n}
+ }
+ }
+ return nil
+ }))
+}
+
+// ClosestSelection gets the first element that matches one of the nodes in the
+// Selection by testing the element itself and traversing up through its ancestors
+// in the DOM tree.
+func (s *Selection) ClosestSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return pushStack(s, nil)
+ }
+ return s.ClosestNodes(sel.Nodes...)
+}
+
+// Parents gets the ancestors of each element in the current Selection. It
+// returns a new Selection object with the matched elements.
+func (s *Selection) Parents() *Selection {
+ return pushStack(s, getParentsNodes(s.Nodes, nil, nil))
+}
+
+// ParentsFiltered gets the ancestors of each element in the current
+// Selection. It returns a new Selection object with the matched elements.
+func (s *Selection) ParentsFiltered(selector string) *Selection {
+ return filterAndPush(s, getParentsNodes(s.Nodes, nil, nil), compileMatcher(selector))
+}
+
+// ParentsMatcher gets the ancestors of each element in the current
+// Selection. It returns a new Selection object with the matched elements.
+func (s *Selection) ParentsMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getParentsNodes(s.Nodes, nil, nil), m)
+}
+
+// ParentsUntil gets the ancestors of each element in the Selection, up to but
+// not including the element matched by the selector. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) ParentsUntil(selector string) *Selection {
+ return pushStack(s, getParentsNodes(s.Nodes, compileMatcher(selector), nil))
+}
+
+// ParentsUntilMatcher gets the ancestors of each element in the Selection, up to but
+// not including the element matched by the matcher. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) ParentsUntilMatcher(m Matcher) *Selection {
+ return pushStack(s, getParentsNodes(s.Nodes, m, nil))
+}
+
+// ParentsUntilSelection gets the ancestors of each element in the Selection,
+// up to but not including the elements in the specified Selection. It returns a
+// new Selection object containing the matched elements.
+func (s *Selection) ParentsUntilSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return s.Parents()
+ }
+ return s.ParentsUntilNodes(sel.Nodes...)
+}
+
+// ParentsUntilNodes gets the ancestors of each element in the Selection,
+// up to but not including the specified nodes. It returns a
+// new Selection object containing the matched elements.
+func (s *Selection) ParentsUntilNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, getParentsNodes(s.Nodes, nil, nodes))
+}
+
+// ParentsFilteredUntil is like ParentsUntil, with the option to filter the
+// results based on a selector string. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) ParentsFilteredUntil(filterSelector, untilSelector string) *Selection {
+ return filterAndPush(s, getParentsNodes(s.Nodes, compileMatcher(untilSelector), nil), compileMatcher(filterSelector))
+}
+
+// ParentsFilteredUntilMatcher is like ParentsUntilMatcher, with the option to filter the
+// results based on a matcher. It returns a new Selection object containing the matched elements.
+func (s *Selection) ParentsFilteredUntilMatcher(filter, until Matcher) *Selection {
+ return filterAndPush(s, getParentsNodes(s.Nodes, until, nil), filter)
+}
+
+// ParentsFilteredUntilSelection is like ParentsUntilSelection, with the
+// option to filter the results based on a selector string. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) ParentsFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
+ return s.ParentsMatcherUntilSelection(compileMatcher(filterSelector), sel)
+}
+
+// ParentsMatcherUntilSelection is like ParentsUntilSelection, with the
+// option to filter the results based on a matcher. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) ParentsMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
+ if sel == nil {
+ return s.ParentsMatcher(filter)
+ }
+ return s.ParentsMatcherUntilNodes(filter, sel.Nodes...)
+}
+
+// ParentsFilteredUntilNodes is like ParentsUntilNodes, with the
+// option to filter the results based on a selector string. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) ParentsFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
+ return filterAndPush(s, getParentsNodes(s.Nodes, nil, nodes), compileMatcher(filterSelector))
+}
+
+// ParentsMatcherUntilNodes is like ParentsUntilNodes, with the
+// option to filter the results based on a matcher. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) ParentsMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
+ return filterAndPush(s, getParentsNodes(s.Nodes, nil, nodes), filter)
+}
+
+// Siblings gets the siblings of each element in the Selection. It returns
+// a new Selection object containing the matched elements.
+func (s *Selection) Siblings() *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil))
+}
+
+// SiblingsFiltered gets the siblings of each element in the Selection
+// filtered by a selector. It returns a new Selection object containing the
+// matched elements.
+func (s *Selection) SiblingsFiltered(selector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil), compileMatcher(selector))
+}
+
+// SiblingsMatcher gets the siblings of each element in the Selection
+// filtered by a matcher. It returns a new Selection object containing the
+// matched elements.
+func (s *Selection) SiblingsMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil), m)
+}
+
+// Next gets the immediately following sibling of each element in the
+// Selection. It returns a new Selection object containing the matched elements.
+func (s *Selection) Next() *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil))
+}
+
+// NextFiltered gets the immediately following sibling of each element in the
+// Selection filtered by a selector. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) NextFiltered(selector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil), compileMatcher(selector))
+}
+
+// NextMatcher gets the immediately following sibling of each element in the
+// Selection filtered by a matcher. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) NextMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil), m)
+}
+
+// NextAll gets all the following siblings of each element in the
+// Selection. It returns a new Selection object containing the matched elements.
+func (s *Selection) NextAll() *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil))
+}
+
+// NextAllFiltered gets all the following siblings of each element in the
+// Selection filtered by a selector. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) NextAllFiltered(selector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil), compileMatcher(selector))
+}
+
+// NextAllMatcher gets all the following siblings of each element in the
+// Selection filtered by a matcher. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) NextAllMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil), m)
+}
+
+// Prev gets the immediately preceding sibling of each element in the
+// Selection. It returns a new Selection object containing the matched elements.
+func (s *Selection) Prev() *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil))
+}
+
+// PrevFiltered gets the immediately preceding sibling of each element in the
+// Selection filtered by a selector. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) PrevFiltered(selector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil), compileMatcher(selector))
+}
+
+// PrevMatcher gets the immediately preceding sibling of each element in the
+// Selection filtered by a matcher. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) PrevMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil), m)
+}
+
+// PrevAll gets all the preceding siblings of each element in the
+// Selection. It returns a new Selection object containing the matched elements.
+func (s *Selection) PrevAll() *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil))
+}
+
+// PrevAllFiltered gets all the preceding siblings of each element in the
+// Selection filtered by a selector. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) PrevAllFiltered(selector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil), compileMatcher(selector))
+}
+
+// PrevAllMatcher gets all the preceding siblings of each element in the
+// Selection filtered by a matcher. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) PrevAllMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil), m)
+}
+
+// NextUntil gets all following siblings of each element up to but not
+// including the element matched by the selector. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) NextUntil(selector string) *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ compileMatcher(selector), nil))
+}
+
+// NextUntilMatcher gets all following siblings of each element up to but not
+// including the element matched by the matcher. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) NextUntilMatcher(m Matcher) *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ m, nil))
+}
+
+// NextUntilSelection gets all following siblings of each element up to but not
+// including the element matched by the Selection. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) NextUntilSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return s.NextAll()
+ }
+ return s.NextUntilNodes(sel.Nodes...)
+}
+
+// NextUntilNodes gets all following siblings of each element up to but not
+// including the element matched by the nodes. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) NextUntilNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ nil, nodes))
+}
+
+// PrevUntil gets all preceding siblings of each element up to but not
+// including the element matched by the selector. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) PrevUntil(selector string) *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ compileMatcher(selector), nil))
+}
+
+// PrevUntilMatcher gets all preceding siblings of each element up to but not
+// including the element matched by the matcher. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) PrevUntilMatcher(m Matcher) *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ m, nil))
+}
+
+// PrevUntilSelection gets all preceding siblings of each element up to but not
+// including the element matched by the Selection. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) PrevUntilSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return s.PrevAll()
+ }
+ return s.PrevUntilNodes(sel.Nodes...)
+}
+
+// PrevUntilNodes gets all preceding siblings of each element up to but not
+// including the element matched by the nodes. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) PrevUntilNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ nil, nodes))
+}
+
+// NextFilteredUntil is like NextUntil, with the option to filter
+// the results based on a selector string.
+// It returns a new Selection object containing the matched elements.
+func (s *Selection) NextFilteredUntil(filterSelector, untilSelector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ compileMatcher(untilSelector), nil), compileMatcher(filterSelector))
+}
+
+// NextFilteredUntilMatcher is like NextUntilMatcher, with the option to filter
+// the results based on a matcher.
+// It returns a new Selection object containing the matched elements.
+func (s *Selection) NextFilteredUntilMatcher(filter, until Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ until, nil), filter)
+}
+
+// NextFilteredUntilSelection is like NextUntilSelection, with the
+// option to filter the results based on a selector string. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) NextFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
+ return s.NextMatcherUntilSelection(compileMatcher(filterSelector), sel)
+}
+
+// NextMatcherUntilSelection is like NextUntilSelection, with the
+// option to filter the results based on a matcher. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) NextMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
+ if sel == nil {
+ return s.NextMatcher(filter)
+ }
+ return s.NextMatcherUntilNodes(filter, sel.Nodes...)
+}
+
+// NextFilteredUntilNodes is like NextUntilNodes, with the
+// option to filter the results based on a selector string. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) NextFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ nil, nodes), compileMatcher(filterSelector))
+}
+
+// NextMatcherUntilNodes is like NextUntilNodes, with the
+// option to filter the results based on a matcher. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) NextMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ nil, nodes), filter)
+}
+
+// PrevFilteredUntil is like PrevUntil, with the option to filter
+// the results based on a selector string.
+// It returns a new Selection object containing the matched elements.
+func (s *Selection) PrevFilteredUntil(filterSelector, untilSelector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ compileMatcher(untilSelector), nil), compileMatcher(filterSelector))
+}
+
+// PrevFilteredUntilMatcher is like PrevUntilMatcher, with the option to filter
+// the results based on a matcher.
+// It returns a new Selection object containing the matched elements.
+func (s *Selection) PrevFilteredUntilMatcher(filter, until Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ until, nil), filter)
+}
+
+// PrevFilteredUntilSelection is like PrevUntilSelection, with the
+// option to filter the results based on a selector string. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) PrevFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
+ return s.PrevMatcherUntilSelection(compileMatcher(filterSelector), sel)
+}
+
+// PrevMatcherUntilSelection is like PrevUntilSelection, with the
+// option to filter the results based on a matcher. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) PrevMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
+ if sel == nil {
+ return s.PrevMatcher(filter)
+ }
+ return s.PrevMatcherUntilNodes(filter, sel.Nodes...)
+}
+
+// PrevFilteredUntilNodes is like PrevUntilNodes, with the
+// option to filter the results based on a selector string. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) PrevFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ nil, nodes), compileMatcher(filterSelector))
+}
+
+// PrevMatcherUntilNodes is like PrevUntilNodes, with the
+// option to filter the results based on a matcher. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) PrevMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ nil, nodes), filter)
+}
+
+// Filter and push filters the nodes based on a matcher, and pushes the results
+// on the stack, with the srcSel as previous selection.
+func filterAndPush(srcSel *Selection, nodes []*html.Node, m Matcher) *Selection {
+ // Create a temporary Selection with the specified nodes to filter using winnow
+ sel := &Selection{nodes, srcSel.document, nil}
+ // Filter based on matcher and push on stack
+ return pushStack(srcSel, winnow(sel, m, true))
+}
+
+// Internal implementation of Find that return raw nodes.
+func findWithMatcher(nodes []*html.Node, m Matcher) []*html.Node {
+ // Map nodes to find the matches within the children of each node
+ return mapNodes(nodes, func(i int, n *html.Node) (result []*html.Node) {
+ // Go down one level, becausejQuery's Find selects only within descendants
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if c.Type == html.ElementNode {
+ result = append(result, m.MatchAll(c)...)
+ }
+ }
+ return
+ })
+}
+
+// Internal implementation to get all parent nodes, stopping at the specified
+// node (or nil if no stop).
+func getParentsNodes(nodes []*html.Node, stopm Matcher, stopNodes []*html.Node) []*html.Node {
+ return mapNodes(nodes, func(i int, n *html.Node) (result []*html.Node) {
+ for p := n.Parent; p != nil; p = p.Parent {
+ sel := newSingleSelection(p, nil)
+ if stopm != nil {
+ if sel.IsMatcher(stopm) {
+ break
+ }
+ } else if len(stopNodes) > 0 {
+ if sel.IsNodes(stopNodes...) {
+ break
+ }
+ }
+ if p.Type == html.ElementNode {
+ result = append(result, p)
+ }
+ }
+ return
+ })
+}
+
+// Internal implementation of sibling nodes that return a raw slice of matches.
+func getSiblingNodes(nodes []*html.Node, st siblingType, untilm Matcher, untilNodes []*html.Node) []*html.Node {
+ var f func(*html.Node) bool
+
+ // If the requested siblings are ...Until, create the test function to
+ // determine if the until condition is reached (returns true if it is)
+ if st == siblingNextUntil || st == siblingPrevUntil {
+ f = func(n *html.Node) bool {
+ if untilm != nil {
+ // Matcher-based condition
+ sel := newSingleSelection(n, nil)
+ return sel.IsMatcher(untilm)
+ } else if len(untilNodes) > 0 {
+ // Nodes-based condition
+ sel := newSingleSelection(n, nil)
+ return sel.IsNodes(untilNodes...)
+ }
+ return false
+ }
+ }
+
+ return mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
+ return getChildrenWithSiblingType(n.Parent, st, n, f)
+ })
+}
+
+// Gets the children nodes of each node in the specified slice of nodes,
+// based on the sibling type request.
+func getChildrenNodes(nodes []*html.Node, st siblingType) []*html.Node {
+ return mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
+ return getChildrenWithSiblingType(n, st, nil, nil)
+ })
+}
+
+// Gets the children of the specified parent, based on the requested sibling
+// type, skipping a specified node if required.
+func getChildrenWithSiblingType(parent *html.Node, st siblingType, skipNode *html.Node,
+ untilFunc func(*html.Node) bool) (result []*html.Node) {
+
+ // Create the iterator function
+ var iter = func(cur *html.Node) (ret *html.Node) {
+ // Based on the sibling type requested, iterate the right way
+ for {
+ switch st {
+ case siblingAll, siblingAllIncludingNonElements:
+ if cur == nil {
+ // First iteration, start with first child of parent
+ // Skip node if required
+ if ret = parent.FirstChild; ret == skipNode && skipNode != nil {
+ ret = skipNode.NextSibling
+ }
+ } else {
+ // Skip node if required
+ if ret = cur.NextSibling; ret == skipNode && skipNode != nil {
+ ret = skipNode.NextSibling
+ }
+ }
+ case siblingPrev, siblingPrevAll, siblingPrevUntil:
+ if cur == nil {
+ // Start with previous sibling of the skip node
+ ret = skipNode.PrevSibling
+ } else {
+ ret = cur.PrevSibling
+ }
+ case siblingNext, siblingNextAll, siblingNextUntil:
+ if cur == nil {
+ // Start with next sibling of the skip node
+ ret = skipNode.NextSibling
+ } else {
+ ret = cur.NextSibling
+ }
+ default:
+ panic("Invalid sibling type.")
+ }
+ if ret == nil || ret.Type == html.ElementNode || st == siblingAllIncludingNonElements {
+ return
+ }
+ // Not a valid node, try again from this one
+ cur = ret
+ }
+ }
+
+ for c := iter(nil); c != nil; c = iter(c) {
+ // If this is an ...Until case, test before append (returns true
+ // if the until condition is reached)
+ if st == siblingNextUntil || st == siblingPrevUntil {
+ if untilFunc(c) {
+ return
+ }
+ }
+ result = append(result, c)
+ if st == siblingNext || st == siblingPrev {
+ // Only one node was requested (immediate next or previous), so exit
+ return
+ }
+ }
+ return
+}
+
+// Internal implementation of parent nodes that return a raw slice of Nodes.
+func getParentNodes(nodes []*html.Node) []*html.Node {
+ return mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
+ if n.Parent != nil && n.Parent.Type == html.ElementNode {
+ return []*html.Node{n.Parent}
+ }
+ return nil
+ })
+}
+
+// Internal map function used by many traversing methods. Takes the source nodes
+// to iterate on and the mapping function that returns an array of nodes.
+// Returns an array of nodes mapped by calling the callback function once for
+// each node in the source nodes.
+func mapNodes(nodes []*html.Node, f func(int, *html.Node) []*html.Node) (result []*html.Node) {
+ set := make(map[*html.Node]bool)
+ for i, n := range nodes {
+ if vals := f(i, n); len(vals) > 0 {
+ result = appendWithoutDuplicates(result, vals, set)
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/type.go b/vendor/github.com/PuerkitoBio/goquery/type.go
new file mode 100644
index 000000000..6ad51dbc5
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/type.go
@@ -0,0 +1,141 @@
+package goquery
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/andybalholm/cascadia"
+
+ "golang.org/x/net/html"
+)
+
+// Document represents an HTML document to be manipulated. Unlike jQuery, which
+// is loaded as part of a DOM document, and thus acts upon its containing
+// document, GoQuery doesn't know which HTML document to act upon. So it needs
+// to be told, and that's what the Document class is for. It holds the root
+// document node to manipulate, and can make selections on this document.
+type Document struct {
+ *Selection
+ Url *url.URL
+ rootNode *html.Node
+}
+
+// NewDocumentFromNode is a Document constructor that takes a root html Node
+// as argument.
+func NewDocumentFromNode(root *html.Node) *Document {
+ return newDocument(root, nil)
+}
+
+// NewDocument is a Document constructor that takes a string URL as argument.
+// It loads the specified document, parses it, and stores the root Document
+// node, ready to be manipulated.
+//
+// Deprecated: Use the net/http standard library package to make the request
+// and validate the response before calling goquery.NewDocumentFromReader
+// with the response's body.
+func NewDocument(url string) (*Document, error) {
+ // Load the URL
+ res, e := http.Get(url)
+ if e != nil {
+ return nil, e
+ }
+ return NewDocumentFromResponse(res)
+}
+
+// NewDocumentFromReader returns a Document from an io.Reader.
+// It returns an error as second value if the reader's data cannot be parsed
+// as html. It does not check if the reader is also an io.Closer, the
+// provided reader is never closed by this call. It is the responsibility
+// of the caller to close it if required.
+func NewDocumentFromReader(r io.Reader) (*Document, error) {
+ root, e := html.Parse(r)
+ if e != nil {
+ return nil, e
+ }
+ return newDocument(root, nil), nil
+}
+
+// NewDocumentFromResponse is another Document constructor that takes an http response as argument.
+// It loads the specified response's document, parses it, and stores the root Document
+// node, ready to be manipulated. The response's body is closed on return.
+//
+// Deprecated: Use goquery.NewDocumentFromReader with the response's body.
+func NewDocumentFromResponse(res *http.Response) (*Document, error) {
+ if res == nil {
+ return nil, errors.New("Response is nil")
+ }
+ defer res.Body.Close()
+ if res.Request == nil {
+ return nil, errors.New("Response.Request is nil")
+ }
+
+ // Parse the HTML into nodes
+ root, e := html.Parse(res.Body)
+ if e != nil {
+ return nil, e
+ }
+
+ // Create and fill the document
+ return newDocument(root, res.Request.URL), nil
+}
+
+// CloneDocument creates a deep-clone of a document.
+func CloneDocument(doc *Document) *Document {
+ return newDocument(cloneNode(doc.rootNode), doc.Url)
+}
+
+// Private constructor, make sure all fields are correctly filled.
+func newDocument(root *html.Node, url *url.URL) *Document {
+ // Create and fill the document
+ d := &Document{nil, url, root}
+ d.Selection = newSingleSelection(root, d)
+ return d
+}
+
+// Selection represents a collection of nodes matching some criteria. The
+// initial Selection can be created by using Document.Find, and then
+// manipulated using the jQuery-like chainable syntax and methods.
+type Selection struct {
+ Nodes []*html.Node
+ document *Document
+ prevSel *Selection
+}
+
+// Helper constructor to create an empty selection
+func newEmptySelection(doc *Document) *Selection {
+ return &Selection{nil, doc, nil}
+}
+
+// Helper constructor to create a selection of only one node
+func newSingleSelection(node *html.Node, doc *Document) *Selection {
+ return &Selection{[]*html.Node{node}, doc, nil}
+}
+
+// Matcher is an interface that defines the methods to match
+// HTML nodes against a compiled selector string. Cascadia's
+// Selector implements this interface.
+type Matcher interface {
+ Match(*html.Node) bool
+ MatchAll(*html.Node) []*html.Node
+ Filter([]*html.Node) []*html.Node
+}
+
+// compileMatcher compiles the selector string s and returns
+// the corresponding Matcher. If s is an invalid selector string,
+// it returns a Matcher that fails all matches.
+func compileMatcher(s string) Matcher {
+ cs, err := cascadia.Compile(s)
+ if err != nil {
+ return invalidMatcher{}
+ }
+ return cs
+}
+
+// invalidMatcher is a Matcher that always fails to match.
+type invalidMatcher struct{}
+
+func (invalidMatcher) Match(n *html.Node) bool { return false }
+func (invalidMatcher) MatchAll(n *html.Node) []*html.Node { return nil }
+func (invalidMatcher) Filter(ns []*html.Node) []*html.Node { return nil }
diff --git a/vendor/github.com/PuerkitoBio/goquery/utilities.go b/vendor/github.com/PuerkitoBio/goquery/utilities.go
new file mode 100644
index 000000000..b4c061a4d
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/utilities.go
@@ -0,0 +1,161 @@
+package goquery
+
+import (
+ "bytes"
+
+ "golang.org/x/net/html"
+)
+
+// used to determine if a set (map[*html.Node]bool) should be used
+// instead of iterating over a slice. The set uses more memory and
+// is slower than slice iteration for small N.
+const minNodesForSet = 1000
+
+var nodeNames = []string{
+ html.ErrorNode: "#error",
+ html.TextNode: "#text",
+ html.DocumentNode: "#document",
+ html.CommentNode: "#comment",
+}
+
+// NodeName returns the node name of the first element in the selection.
+// It tries to behave in a similar way as the DOM's nodeName property
+// (https://developer.mozilla.org/en-US/docs/Web/API/Node/nodeName).
+//
+// Go's net/html package defines the following node types, listed with
+// the corresponding returned value from this function:
+//
+// ErrorNode : #error
+// TextNode : #text
+// DocumentNode : #document
+// ElementNode : the element's tag name
+// CommentNode : #comment
+// DoctypeNode : the name of the document type
+//
+func NodeName(s *Selection) string {
+ if s.Length() == 0 {
+ return ""
+ }
+ switch n := s.Get(0); n.Type {
+ case html.ElementNode, html.DoctypeNode:
+ return n.Data
+ default:
+ if n.Type >= 0 && int(n.Type) < len(nodeNames) {
+ return nodeNames[n.Type]
+ }
+ return ""
+ }
+}
+
+// OuterHtml returns the outer HTML rendering of the first item in
+// the selection - that is, the HTML including the first element's
+// tag and attributes.
+//
+// Unlike InnerHtml, this is a function and not a method on the Selection,
+// because this is not a jQuery method (in javascript-land, this is
+// a property provided by the DOM).
+func OuterHtml(s *Selection) (string, error) {
+ var buf bytes.Buffer
+
+ if s.Length() == 0 {
+ return "", nil
+ }
+ n := s.Get(0)
+ if err := html.Render(&buf, n); err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+// Loop through all container nodes to search for the target node.
+func sliceContains(container []*html.Node, contained *html.Node) bool {
+ for _, n := range container {
+ if nodeContains(n, contained) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Checks if the contained node is within the container node.
+func nodeContains(container *html.Node, contained *html.Node) bool {
+ // Check if the parent of the contained node is the container node, traversing
+ // upward until the top is reached, or the container is found.
+ for contained = contained.Parent; contained != nil; contained = contained.Parent {
+ if container == contained {
+ return true
+ }
+ }
+ return false
+}
+
+// Checks if the target node is in the slice of nodes.
+func isInSlice(slice []*html.Node, node *html.Node) bool {
+ return indexInSlice(slice, node) > -1
+}
+
+// Returns the index of the target node in the slice, or -1.
+func indexInSlice(slice []*html.Node, node *html.Node) int {
+ if node != nil {
+ for i, n := range slice {
+ if n == node {
+ return i
+ }
+ }
+ }
+ return -1
+}
+
+// Appends the new nodes to the target slice, making sure no duplicate is added.
+// There is no check to the original state of the target slice, so it may still
+// contain duplicates. The target slice is returned because append() may create
+// a new underlying array. If targetSet is nil, a local set is created with the
+// target if len(target) + len(nodes) is greater than minNodesForSet.
+func appendWithoutDuplicates(target []*html.Node, nodes []*html.Node, targetSet map[*html.Node]bool) []*html.Node {
+ // if there are not that many nodes, don't use the map, faster to just use nested loops
+ // (unless a non-nil targetSet is passed, in which case the caller knows better).
+ if targetSet == nil && len(target)+len(nodes) < minNodesForSet {
+ for _, n := range nodes {
+ if !isInSlice(target, n) {
+ target = append(target, n)
+ }
+ }
+ return target
+ }
+
+ // if a targetSet is passed, then assume it is reliable, otherwise create one
+ // and initialize it with the current target contents.
+ if targetSet == nil {
+ targetSet = make(map[*html.Node]bool, len(target))
+ for _, n := range target {
+ targetSet[n] = true
+ }
+ }
+ for _, n := range nodes {
+ if !targetSet[n] {
+ target = append(target, n)
+ targetSet[n] = true
+ }
+ }
+
+ return target
+}
+
+// Loop through a selection, returning only those nodes that pass the predicate
+// function.
+func grep(sel *Selection, predicate func(i int, s *Selection) bool) (result []*html.Node) {
+ for i, n := range sel.Nodes {
+ if predicate(i, newSingleSelection(n, sel.document)) {
+ result = append(result, n)
+ }
+ }
+ return result
+}
+
+// Creates a new Selection object based on the specified nodes, and keeps the
+// source Selection object on the stack (linked list).
+func pushStack(fromSel *Selection, nodes []*html.Node) *Selection {
+ result := &Selection{nodes, fromSel.document, fromSel}
+ return result
+}
diff --git a/vendor/github.com/andybalholm/cascadia/.travis.yml b/vendor/github.com/andybalholm/cascadia/.travis.yml
new file mode 100644
index 000000000..6f227517d
--- /dev/null
+++ b/vendor/github.com/andybalholm/cascadia/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.3
+ - 1.4
+
+install:
+ - go get github.com/andybalholm/cascadia
+
+script:
+ - go test -v
+
+notifications:
+ email: false
diff --git a/vendor/github.com/andybalholm/cascadia/LICENSE b/vendor/github.com/andybalholm/cascadia/LICENSE
new file mode 100644
index 000000000..ee5ad35ac
--- /dev/null
+++ b/vendor/github.com/andybalholm/cascadia/LICENSE
@@ -0,0 +1,24 @@
+Copyright (c) 2011 Andy Balholm. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/andybalholm/cascadia/README.md b/vendor/github.com/andybalholm/cascadia/README.md
new file mode 100644
index 000000000..9021cb92a
--- /dev/null
+++ b/vendor/github.com/andybalholm/cascadia/README.md
@@ -0,0 +1,7 @@
+# cascadia
+
+[![](https://travis-ci.org/andybalholm/cascadia.svg)](https://travis-ci.org/andybalholm/cascadia)
+
+The Cascadia package implements CSS selectors for use with the parse trees produced by the html package.
+
+To test CSS selectors without writing Go code, check out [cascadia](https://github.com/suntong/cascadia) the command line tool, a thin wrapper around this package.
diff --git a/vendor/github.com/andybalholm/cascadia/go.mod b/vendor/github.com/andybalholm/cascadia/go.mod
new file mode 100644
index 000000000..e6febbbfe
--- /dev/null
+++ b/vendor/github.com/andybalholm/cascadia/go.mod
@@ -0,0 +1,3 @@
+module "github.com/andybalholm/cascadia"
+
+require "golang.org/x/net" v0.0.0-20180218175443-cbe0f9307d01
diff --git a/vendor/github.com/andybalholm/cascadia/parser.go b/vendor/github.com/andybalholm/cascadia/parser.go
new file mode 100644
index 000000000..495db9ccf
--- /dev/null
+++ b/vendor/github.com/andybalholm/cascadia/parser.go
@@ -0,0 +1,835 @@
+// Package cascadia is an implementation of CSS selectors.
+package cascadia
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "golang.org/x/net/html"
+)
+
+// a parser for CSS selectors
+type parser struct {
+ s string // the source text
+ i int // the current position
+}
+
+// parseEscape parses a backslash escape.
+func (p *parser) parseEscape() (result string, err error) {
+ if len(p.s) < p.i+2 || p.s[p.i] != '\\' {
+ return "", errors.New("invalid escape sequence")
+ }
+
+ start := p.i + 1
+ c := p.s[start]
+ switch {
+ case c == '\r' || c == '\n' || c == '\f':
+ return "", errors.New("escaped line ending outside string")
+ case hexDigit(c):
+ // unicode escape (hex)
+ var i int
+ for i = start; i < p.i+6 && i < len(p.s) && hexDigit(p.s[i]); i++ {
+ // empty
+ }
+ v, _ := strconv.ParseUint(p.s[start:i], 16, 21)
+ if len(p.s) > i {
+ switch p.s[i] {
+ case '\r':
+ i++
+ if len(p.s) > i && p.s[i] == '\n' {
+ i++
+ }
+ case ' ', '\t', '\n', '\f':
+ i++
+ }
+ }
+ p.i = i
+ return string(rune(v)), nil
+ }
+
+ // Return the literal character after the backslash.
+ result = p.s[start : start+1]
+ p.i += 2
+ return result, nil
+}
+
+func hexDigit(c byte) bool {
+ return '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F'
+}
+
+// nameStart returns whether c can be the first character of an identifier
+// (not counting an initial hyphen, or an escape sequence).
+func nameStart(c byte) bool {
+ return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_' || c > 127
+}
+
+// nameChar returns whether c can be a character within an identifier
+// (not counting an escape sequence).
+func nameChar(c byte) bool {
+ return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_' || c > 127 ||
+ c == '-' || '0' <= c && c <= '9'
+}
+
+// parseIdentifier parses an identifier.
+func (p *parser) parseIdentifier() (result string, err error) {
+ startingDash := false
+ if len(p.s) > p.i && p.s[p.i] == '-' {
+ startingDash = true
+ p.i++
+ }
+
+ if len(p.s) <= p.i {
+ return "", errors.New("expected identifier, found EOF instead")
+ }
+
+ if c := p.s[p.i]; !(nameStart(c) || c == '\\') {
+ return "", fmt.Errorf("expected identifier, found %c instead", c)
+ }
+
+ result, err = p.parseName()
+ if startingDash && err == nil {
+ result = "-" + result
+ }
+ return
+}
+
+// parseName parses a name (which is like an identifier, but doesn't have
+// extra restrictions on the first character).
+func (p *parser) parseName() (result string, err error) {
+ i := p.i
+loop:
+ for i < len(p.s) {
+ c := p.s[i]
+ switch {
+ case nameChar(c):
+ start := i
+ for i < len(p.s) && nameChar(p.s[i]) {
+ i++
+ }
+ result += p.s[start:i]
+ case c == '\\':
+ p.i = i
+ val, err := p.parseEscape()
+ if err != nil {
+ return "", err
+ }
+ i = p.i
+ result += val
+ default:
+ break loop
+ }
+ }
+
+ if result == "" {
+ return "", errors.New("expected name, found EOF instead")
+ }
+
+ p.i = i
+ return result, nil
+}
+
+// parseString parses a single- or double-quoted string.
+func (p *parser) parseString() (result string, err error) {
+ i := p.i
+ if len(p.s) < i+2 {
+ return "", errors.New("expected string, found EOF instead")
+ }
+
+ quote := p.s[i]
+ i++
+
+loop:
+ for i < len(p.s) {
+ switch p.s[i] {
+ case '\\':
+ if len(p.s) > i+1 {
+ switch c := p.s[i+1]; c {
+ case '\r':
+ if len(p.s) > i+2 && p.s[i+2] == '\n' {
+ i += 3
+ continue loop
+ }
+ fallthrough
+ case '\n', '\f':
+ i += 2
+ continue loop
+ }
+ }
+ p.i = i
+ val, err := p.parseEscape()
+ if err != nil {
+ return "", err
+ }
+ i = p.i
+ result += val
+ case quote:
+ break loop
+ case '\r', '\n', '\f':
+ return "", errors.New("unexpected end of line in string")
+ default:
+ start := i
+ for i < len(p.s) {
+ if c := p.s[i]; c == quote || c == '\\' || c == '\r' || c == '\n' || c == '\f' {
+ break
+ }
+ i++
+ }
+ result += p.s[start:i]
+ }
+ }
+
+ if i >= len(p.s) {
+ return "", errors.New("EOF in string")
+ }
+
+ // Consume the final quote.
+ i++
+
+ p.i = i
+ return result, nil
+}
+
+// parseRegex parses a regular expression; the end is defined by encountering an
+// unmatched closing ')' or ']' which is not consumed
+func (p *parser) parseRegex() (rx *regexp.Regexp, err error) {
+ i := p.i
+ if len(p.s) < i+2 {
+ return nil, errors.New("expected regular expression, found EOF instead")
+ }
+
+ // number of open parens or brackets;
+ // when it becomes negative, finished parsing regex
+ open := 0
+
+loop:
+ for i < len(p.s) {
+ switch p.s[i] {
+ case '(', '[':
+ open++
+ case ')', ']':
+ open--
+ if open < 0 {
+ break loop
+ }
+ }
+ i++
+ }
+
+ if i >= len(p.s) {
+ return nil, errors.New("EOF in regular expression")
+ }
+ rx, err = regexp.Compile(p.s[p.i:i])
+ p.i = i
+ return rx, err
+}
+
+// skipWhitespace consumes whitespace characters and comments.
+// It returns true if there was actually anything to skip.
+func (p *parser) skipWhitespace() bool {
+ i := p.i
+ for i < len(p.s) {
+ switch p.s[i] {
+ case ' ', '\t', '\r', '\n', '\f':
+ i++
+ continue
+ case '/':
+ if strings.HasPrefix(p.s[i:], "/*") {
+ end := strings.Index(p.s[i+len("/*"):], "*/")
+ if end != -1 {
+ i += end + len("/**/")
+ continue
+ }
+ }
+ }
+ break
+ }
+
+ if i > p.i {
+ p.i = i
+ return true
+ }
+
+ return false
+}
+
+// consumeParenthesis consumes an opening parenthesis and any following
+// whitespace. It returns true if there was actually a parenthesis to skip.
+func (p *parser) consumeParenthesis() bool {
+ if p.i < len(p.s) && p.s[p.i] == '(' {
+ p.i++
+ p.skipWhitespace()
+ return true
+ }
+ return false
+}
+
+// consumeClosingParenthesis consumes a closing parenthesis and any preceding
+// whitespace. It returns true if there was actually a parenthesis to skip.
+func (p *parser) consumeClosingParenthesis() bool {
+ i := p.i
+ p.skipWhitespace()
+ if p.i < len(p.s) && p.s[p.i] == ')' {
+ p.i++
+ return true
+ }
+ p.i = i
+ return false
+}
+
+// parseTypeSelector parses a type selector (one that matches by tag name).
+func (p *parser) parseTypeSelector() (result Selector, err error) {
+ tag, err := p.parseIdentifier()
+ if err != nil {
+ return nil, err
+ }
+
+ return typeSelector(tag), nil
+}
+
+// parseIDSelector parses a selector that matches by id attribute.
+func (p *parser) parseIDSelector() (Selector, error) {
+ if p.i >= len(p.s) {
+ return nil, fmt.Errorf("expected id selector (#id), found EOF instead")
+ }
+ if p.s[p.i] != '#' {
+ return nil, fmt.Errorf("expected id selector (#id), found '%c' instead", p.s[p.i])
+ }
+
+ p.i++
+ id, err := p.parseName()
+ if err != nil {
+ return nil, err
+ }
+
+ return attributeEqualsSelector("id", id), nil
+}
+
+// parseClassSelector parses a selector that matches by class attribute.
+func (p *parser) parseClassSelector() (Selector, error) {
+ if p.i >= len(p.s) {
+ return nil, fmt.Errorf("expected class selector (.class), found EOF instead")
+ }
+ if p.s[p.i] != '.' {
+ return nil, fmt.Errorf("expected class selector (.class), found '%c' instead", p.s[p.i])
+ }
+
+ p.i++
+ class, err := p.parseIdentifier()
+ if err != nil {
+ return nil, err
+ }
+
+ return attributeIncludesSelector("class", class), nil
+}
+
+// parseAttributeSelector parses a selector that matches by attribute value.
+func (p *parser) parseAttributeSelector() (Selector, error) {
+ if p.i >= len(p.s) {
+ return nil, fmt.Errorf("expected attribute selector ([attribute]), found EOF instead")
+ }
+ if p.s[p.i] != '[' {
+ return nil, fmt.Errorf("expected attribute selector ([attribute]), found '%c' instead", p.s[p.i])
+ }
+
+ p.i++
+ p.skipWhitespace()
+ key, err := p.parseIdentifier()
+ if err != nil {
+ return nil, err
+ }
+
+ p.skipWhitespace()
+ if p.i >= len(p.s) {
+ return nil, errors.New("unexpected EOF in attribute selector")
+ }
+
+ if p.s[p.i] == ']' {
+ p.i++
+ return attributeExistsSelector(key), nil
+ }
+
+ if p.i+2 >= len(p.s) {
+ return nil, errors.New("unexpected EOF in attribute selector")
+ }
+
+ op := p.s[p.i : p.i+2]
+ if op[0] == '=' {
+ op = "="
+ } else if op[1] != '=' {
+ return nil, fmt.Errorf(`expected equality operator, found "%s" instead`, op)
+ }
+ p.i += len(op)
+
+ p.skipWhitespace()
+ if p.i >= len(p.s) {
+ return nil, errors.New("unexpected EOF in attribute selector")
+ }
+ var val string
+ var rx *regexp.Regexp
+ if op == "#=" {
+ rx, err = p.parseRegex()
+ } else {
+ switch p.s[p.i] {
+ case '\'', '"':
+ val, err = p.parseString()
+ default:
+ val, err = p.parseIdentifier()
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ p.skipWhitespace()
+ if p.i >= len(p.s) {
+ return nil, errors.New("unexpected EOF in attribute selector")
+ }
+ if p.s[p.i] != ']' {
+ return nil, fmt.Errorf("expected ']', found '%c' instead", p.s[p.i])
+ }
+ p.i++
+
+ switch op {
+ case "=":
+ return attributeEqualsSelector(key, val), nil
+ case "!=":
+ return attributeNotEqualSelector(key, val), nil
+ case "~=":
+ return attributeIncludesSelector(key, val), nil
+ case "|=":
+ return attributeDashmatchSelector(key, val), nil
+ case "^=":
+ return attributePrefixSelector(key, val), nil
+ case "$=":
+ return attributeSuffixSelector(key, val), nil
+ case "*=":
+ return attributeSubstringSelector(key, val), nil
+ case "#=":
+ return attributeRegexSelector(key, rx), nil
+ }
+
+ return nil, fmt.Errorf("attribute operator %q is not supported", op)
+}
+
+var errExpectedParenthesis = errors.New("expected '(' but didn't find it")
+var errExpectedClosingParenthesis = errors.New("expected ')' but didn't find it")
+var errUnmatchedParenthesis = errors.New("unmatched '('")
+
+// parsePseudoclassSelector parses a pseudoclass selector like :not(p).
+func (p *parser) parsePseudoclassSelector() (Selector, error) {
+ if p.i >= len(p.s) {
+ return nil, fmt.Errorf("expected pseudoclass selector (:pseudoclass), found EOF instead")
+ }
+ if p.s[p.i] != ':' {
+ return nil, fmt.Errorf("expected attribute selector (:pseudoclass), found '%c' instead", p.s[p.i])
+ }
+
+ p.i++
+ name, err := p.parseIdentifier()
+ if err != nil {
+ return nil, err
+ }
+ name = toLowerASCII(name)
+
+ switch name {
+ case "not", "has", "haschild":
+ if !p.consumeParenthesis() {
+ return nil, errExpectedParenthesis
+ }
+ sel, parseErr := p.parseSelectorGroup()
+ if parseErr != nil {
+ return nil, parseErr
+ }
+ if !p.consumeClosingParenthesis() {
+ return nil, errExpectedClosingParenthesis
+ }
+
+ switch name {
+ case "not":
+ return negatedSelector(sel), nil
+ case "has":
+ return hasDescendantSelector(sel), nil
+ case "haschild":
+ return hasChildSelector(sel), nil
+ }
+
+ case "contains", "containsown":
+ if !p.consumeParenthesis() {
+ return nil, errExpectedParenthesis
+ }
+ if p.i == len(p.s) {
+ return nil, errUnmatchedParenthesis
+ }
+ var val string
+ switch p.s[p.i] {
+ case '\'', '"':
+ val, err = p.parseString()
+ default:
+ val, err = p.parseIdentifier()
+ }
+ if err != nil {
+ return nil, err
+ }
+ val = strings.ToLower(val)
+ p.skipWhitespace()
+ if p.i >= len(p.s) {
+ return nil, errors.New("unexpected EOF in pseudo selector")
+ }
+ if !p.consumeClosingParenthesis() {
+ return nil, errExpectedClosingParenthesis
+ }
+
+ switch name {
+ case "contains":
+ return textSubstrSelector(val), nil
+ case "containsown":
+ return ownTextSubstrSelector(val), nil
+ }
+
+ case "matches", "matchesown":
+ if !p.consumeParenthesis() {
+ return nil, errExpectedParenthesis
+ }
+ rx, err := p.parseRegex()
+ if err != nil {
+ return nil, err
+ }
+ if p.i >= len(p.s) {
+ return nil, errors.New("unexpected EOF in pseudo selector")
+ }
+ if !p.consumeClosingParenthesis() {
+ return nil, errExpectedClosingParenthesis
+ }
+
+ switch name {
+ case "matches":
+ return textRegexSelector(rx), nil
+ case "matchesown":
+ return ownTextRegexSelector(rx), nil
+ }
+
+ case "nth-child", "nth-last-child", "nth-of-type", "nth-last-of-type":
+ if !p.consumeParenthesis() {
+ return nil, errExpectedParenthesis
+ }
+ a, b, err := p.parseNth()
+ if err != nil {
+ return nil, err
+ }
+ if !p.consumeClosingParenthesis() {
+ return nil, errExpectedClosingParenthesis
+ }
+ if a == 0 {
+ switch name {
+ case "nth-child":
+ return simpleNthChildSelector(b, false), nil
+ case "nth-of-type":
+ return simpleNthChildSelector(b, true), nil
+ case "nth-last-child":
+ return simpleNthLastChildSelector(b, false), nil
+ case "nth-last-of-type":
+ return simpleNthLastChildSelector(b, true), nil
+ }
+ }
+ return nthChildSelector(a, b,
+ name == "nth-last-child" || name == "nth-last-of-type",
+ name == "nth-of-type" || name == "nth-last-of-type"),
+ nil
+
+ case "first-child":
+ return simpleNthChildSelector(1, false), nil
+ case "last-child":
+ return simpleNthLastChildSelector(1, false), nil
+ case "first-of-type":
+ return simpleNthChildSelector(1, true), nil
+ case "last-of-type":
+ return simpleNthLastChildSelector(1, true), nil
+ case "only-child":
+ return onlyChildSelector(false), nil
+ case "only-of-type":
+ return onlyChildSelector(true), nil
+ case "input":
+ return inputSelector, nil
+ case "empty":
+ return emptyElementSelector, nil
+ case "root":
+ return rootSelector, nil
+ }
+
+ return nil, fmt.Errorf("unknown pseudoclass :%s", name)
+}
+
+// parseInteger parses a decimal integer.
+func (p *parser) parseInteger() (int, error) {
+ i := p.i
+ start := i
+ for i < len(p.s) && '0' <= p.s[i] && p.s[i] <= '9' {
+ i++
+ }
+ if i == start {
+ return 0, errors.New("expected integer, but didn't find it")
+ }
+ p.i = i
+
+ val, err := strconv.Atoi(p.s[start:i])
+ if err != nil {
+ return 0, err
+ }
+
+ return val, nil
+}
+
+// parseNth parses the argument for :nth-child (normally of the form an+b).
+func (p *parser) parseNth() (a, b int, err error) {
+ // initial state
+ if p.i >= len(p.s) {
+ goto eof
+ }
+ switch p.s[p.i] {
+ case '-':
+ p.i++
+ goto negativeA
+ case '+':
+ p.i++
+ goto positiveA
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ goto positiveA
+ case 'n', 'N':
+ a = 1
+ p.i++
+ goto readN
+ case 'o', 'O', 'e', 'E':
+ id, nameErr := p.parseName()
+ if nameErr != nil {
+ return 0, 0, nameErr
+ }
+ id = toLowerASCII(id)
+ if id == "odd" {
+ return 2, 1, nil
+ }
+ if id == "even" {
+ return 2, 0, nil
+ }
+ return 0, 0, fmt.Errorf("expected 'odd' or 'even', but found '%s' instead", id)
+ default:
+ goto invalid
+ }
+
+positiveA:
+ if p.i >= len(p.s) {
+ goto eof
+ }
+ switch p.s[p.i] {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ a, err = p.parseInteger()
+ if err != nil {
+ return 0, 0, err
+ }
+ goto readA
+ case 'n', 'N':
+ a = 1
+ p.i++
+ goto readN
+ default:
+ goto invalid
+ }
+
+negativeA:
+ if p.i >= len(p.s) {
+ goto eof
+ }
+ switch p.s[p.i] {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ a, err = p.parseInteger()
+ if err != nil {
+ return 0, 0, err
+ }
+ a = -a
+ goto readA
+ case 'n', 'N':
+ a = -1
+ p.i++
+ goto readN
+ default:
+ goto invalid
+ }
+
+readA:
+ if p.i >= len(p.s) {
+ goto eof
+ }
+ switch p.s[p.i] {
+ case 'n', 'N':
+ p.i++
+ goto readN
+ default:
+ // The number we read as a is actually b.
+ return 0, a, nil
+ }
+
+readN:
+ p.skipWhitespace()
+ if p.i >= len(p.s) {
+ goto eof
+ }
+ switch p.s[p.i] {
+ case '+':
+ p.i++
+ p.skipWhitespace()
+ b, err = p.parseInteger()
+ if err != nil {
+ return 0, 0, err
+ }
+ return a, b, nil
+ case '-':
+ p.i++
+ p.skipWhitespace()
+ b, err = p.parseInteger()
+ if err != nil {
+ return 0, 0, err
+ }
+ return a, -b, nil
+ default:
+ return a, 0, nil
+ }
+
+eof:
+ return 0, 0, errors.New("unexpected EOF while attempting to parse expression of form an+b")
+
+invalid:
+ return 0, 0, errors.New("unexpected character while attempting to parse expression of form an+b")
+}
+
+// parseSimpleSelectorSequence parses a selector sequence that applies to
+// a single element.
+func (p *parser) parseSimpleSelectorSequence() (Selector, error) {
+ var result Selector
+
+ if p.i >= len(p.s) {
+ return nil, errors.New("expected selector, found EOF instead")
+ }
+
+ switch p.s[p.i] {
+ case '*':
+ // It's the universal selector. Just skip over it, since it doesn't affect the meaning.
+ p.i++
+ case '#', '.', '[', ':':
+ // There's no type selector. Wait to process the other till the main loop.
+ default:
+ r, err := p.parseTypeSelector()
+ if err != nil {
+ return nil, err
+ }
+ result = r
+ }
+
+loop:
+ for p.i < len(p.s) {
+ var ns Selector
+ var err error
+ switch p.s[p.i] {
+ case '#':
+ ns, err = p.parseIDSelector()
+ case '.':
+ ns, err = p.parseClassSelector()
+ case '[':
+ ns, err = p.parseAttributeSelector()
+ case ':':
+ ns, err = p.parsePseudoclassSelector()
+ default:
+ break loop
+ }
+ if err != nil {
+ return nil, err
+ }
+ if result == nil {
+ result = ns
+ } else {
+ result = intersectionSelector(result, ns)
+ }
+ }
+
+ if result == nil {
+ result = func(n *html.Node) bool {
+ return n.Type == html.ElementNode
+ }
+ }
+
+ return result, nil
+}
+
+// parseSelector parses a selector that may include combinators.
+func (p *parser) parseSelector() (result Selector, err error) {
+ p.skipWhitespace()
+ result, err = p.parseSimpleSelectorSequence()
+ if err != nil {
+ return
+ }
+
+ for {
+ var combinator byte
+ if p.skipWhitespace() {
+ combinator = ' '
+ }
+ if p.i >= len(p.s) {
+ return
+ }
+
+ switch p.s[p.i] {
+ case '+', '>', '~':
+ combinator = p.s[p.i]
+ p.i++
+ p.skipWhitespace()
+ case ',', ')':
+ // These characters can't begin a selector, but they can legally occur after one.
+ return
+ }
+
+ if combinator == 0 {
+ return
+ }
+
+ c, err := p.parseSimpleSelectorSequence()
+ if err != nil {
+ return nil, err
+ }
+
+ switch combinator {
+ case ' ':
+ result = descendantSelector(result, c)
+ case '>':
+ result = childSelector(result, c)
+ case '+':
+ result = siblingSelector(result, c, true)
+ case '~':
+ result = siblingSelector(result, c, false)
+ }
+ }
+
+ panic("unreachable")
+}
+
+// parseSelectorGroup parses a group of selectors, separated by commas.
+func (p *parser) parseSelectorGroup() (result Selector, err error) {
+ result, err = p.parseSelector()
+ if err != nil {
+ return
+ }
+
+ for p.i < len(p.s) {
+ if p.s[p.i] != ',' {
+ return result, nil
+ }
+ p.i++
+ c, err := p.parseSelector()
+ if err != nil {
+ return nil, err
+ }
+ result = unionSelector(result, c)
+ }
+
+ return
+}
diff --git a/vendor/github.com/andybalholm/cascadia/selector.go b/vendor/github.com/andybalholm/cascadia/selector.go
new file mode 100644
index 000000000..9fb05ccb7
--- /dev/null
+++ b/vendor/github.com/andybalholm/cascadia/selector.go
@@ -0,0 +1,622 @@
+package cascadia
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "strings"
+
+ "golang.org/x/net/html"
+)
+
+// the Selector type, and functions for creating them
+
+// A Selector is a function which tells whether a node matches or not.
+type Selector func(*html.Node) bool
+
+// hasChildMatch returns whether n has any child that matches a.
+func hasChildMatch(n *html.Node, a Selector) bool {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if a(c) {
+ return true
+ }
+ }
+ return false
+}
+
+// hasDescendantMatch performs a depth-first search of n's descendants,
+// testing whether any of them match a. It returns true as soon as a match is
+// found, or false if no match is found.
+func hasDescendantMatch(n *html.Node, a Selector) bool {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if a(c) || (c.Type == html.ElementNode && hasDescendantMatch(c, a)) {
+ return true
+ }
+ }
+ return false
+}
+
+// Compile parses a selector and returns, if successful, a Selector object
+// that can be used to match against html.Node objects.
+func Compile(sel string) (Selector, error) {
+ p := &parser{s: sel}
+ compiled, err := p.parseSelectorGroup()
+ if err != nil {
+ return nil, err
+ }
+
+ if p.i < len(sel) {
+ return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i)
+ }
+
+ return compiled, nil
+}
+
+// MustCompile is like Compile, but panics instead of returning an error.
+func MustCompile(sel string) Selector {
+ compiled, err := Compile(sel)
+ if err != nil {
+ panic(err)
+ }
+ return compiled
+}
+
+// MatchAll returns a slice of the nodes that match the selector,
+// from n and its children.
+func (s Selector) MatchAll(n *html.Node) []*html.Node {
+ return s.matchAllInto(n, nil)
+}
+
+func (s Selector) matchAllInto(n *html.Node, storage []*html.Node) []*html.Node {
+ if s(n) {
+ storage = append(storage, n)
+ }
+
+ for child := n.FirstChild; child != nil; child = child.NextSibling {
+ storage = s.matchAllInto(child, storage)
+ }
+
+ return storage
+}
+
+// Match returns true if the node matches the selector.
+func (s Selector) Match(n *html.Node) bool {
+ return s(n)
+}
+
+// MatchFirst returns the first node that matches s, from n and its children.
+func (s Selector) MatchFirst(n *html.Node) *html.Node {
+ if s.Match(n) {
+ return n
+ }
+
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ m := s.MatchFirst(c)
+ if m != nil {
+ return m
+ }
+ }
+ return nil
+}
+
+// Filter returns the nodes in nodes that match the selector.
+func (s Selector) Filter(nodes []*html.Node) (result []*html.Node) {
+ for _, n := range nodes {
+ if s(n) {
+ result = append(result, n)
+ }
+ }
+ return result
+}
+
+// typeSelector returns a Selector that matches elements with a given tag name.
+func typeSelector(tag string) Selector {
+ tag = toLowerASCII(tag)
+ return func(n *html.Node) bool {
+ return n.Type == html.ElementNode && n.Data == tag
+ }
+}
+
+// toLowerASCII returns s with all ASCII capital letters lowercased.
+func toLowerASCII(s string) string {
+ var b []byte
+ for i := 0; i < len(s); i++ {
+ if c := s[i]; 'A' <= c && c <= 'Z' {
+ if b == nil {
+ b = make([]byte, len(s))
+ copy(b, s)
+ }
+ b[i] = s[i] + ('a' - 'A')
+ }
+ }
+
+ if b == nil {
+ return s
+ }
+
+ return string(b)
+}
+
+// attributeSelector returns a Selector that matches elements
+// where the attribute named key satisifes the function f.
+func attributeSelector(key string, f func(string) bool) Selector {
+ key = toLowerASCII(key)
+ return func(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+ for _, a := range n.Attr {
+ if a.Key == key && f(a.Val) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// attributeExistsSelector returns a Selector that matches elements that have
+// an attribute named key.
+func attributeExistsSelector(key string) Selector {
+ return attributeSelector(key, func(string) bool { return true })
+}
+
+// attributeEqualsSelector returns a Selector that matches elements where
+// the attribute named key has the value val.
+func attributeEqualsSelector(key, val string) Selector {
+ return attributeSelector(key,
+ func(s string) bool {
+ return s == val
+ })
+}
+
+// attributeNotEqualSelector returns a Selector that matches elements where
+// the attribute named key does not have the value val.
+func attributeNotEqualSelector(key, val string) Selector {
+ key = toLowerASCII(key)
+ return func(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+ for _, a := range n.Attr {
+ if a.Key == key && a.Val == val {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+// attributeIncludesSelector returns a Selector that matches elements where
+// the attribute named key is a whitespace-separated list that includes val.
+func attributeIncludesSelector(key, val string) Selector {
+ return attributeSelector(key,
+ func(s string) bool {
+ for s != "" {
+ i := strings.IndexAny(s, " \t\r\n\f")
+ if i == -1 {
+ return s == val
+ }
+ if s[:i] == val {
+ return true
+ }
+ s = s[i+1:]
+ }
+ return false
+ })
+}
+
+// attributeDashmatchSelector returns a Selector that matches elements where
+// the attribute named key equals val or starts with val plus a hyphen.
+func attributeDashmatchSelector(key, val string) Selector {
+ return attributeSelector(key,
+ func(s string) bool {
+ if s == val {
+ return true
+ }
+ if len(s) <= len(val) {
+ return false
+ }
+ if s[:len(val)] == val && s[len(val)] == '-' {
+ return true
+ }
+ return false
+ })
+}
+
+// attributePrefixSelector returns a Selector that matches elements where
+// the attribute named key starts with val.
+func attributePrefixSelector(key, val string) Selector {
+ return attributeSelector(key,
+ func(s string) bool {
+ if strings.TrimSpace(s) == "" {
+ return false
+ }
+ return strings.HasPrefix(s, val)
+ })
+}
+
+// attributeSuffixSelector returns a Selector that matches elements where
+// the attribute named key ends with val.
+func attributeSuffixSelector(key, val string) Selector {
+ return attributeSelector(key,
+ func(s string) bool {
+ if strings.TrimSpace(s) == "" {
+ return false
+ }
+ return strings.HasSuffix(s, val)
+ })
+}
+
+// attributeSubstringSelector returns a Selector that matches nodes where
+// the attribute named key contains val.
+func attributeSubstringSelector(key, val string) Selector {
+ return attributeSelector(key,
+ func(s string) bool {
+ if strings.TrimSpace(s) == "" {
+ return false
+ }
+ return strings.Contains(s, val)
+ })
+}
+
+// attributeRegexSelector returns a Selector that matches nodes where
+// the attribute named key matches the regular expression rx
+func attributeRegexSelector(key string, rx *regexp.Regexp) Selector {
+ return attributeSelector(key,
+ func(s string) bool {
+ return rx.MatchString(s)
+ })
+}
+
+// intersectionSelector returns a selector that matches nodes that match
+// both a and b.
+func intersectionSelector(a, b Selector) Selector {
+ return func(n *html.Node) bool {
+ return a(n) && b(n)
+ }
+}
+
+// unionSelector returns a selector that matches elements that match
+// either a or b.
+func unionSelector(a, b Selector) Selector {
+ return func(n *html.Node) bool {
+ return a(n) || b(n)
+ }
+}
+
+// negatedSelector returns a selector that matches elements that do not match a.
+func negatedSelector(a Selector) Selector {
+ return func(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+ return !a(n)
+ }
+}
+
+// writeNodeText writes the text contained in n and its descendants to b.
+func writeNodeText(n *html.Node, b *bytes.Buffer) {
+ switch n.Type {
+ case html.TextNode:
+ b.WriteString(n.Data)
+ case html.ElementNode:
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ writeNodeText(c, b)
+ }
+ }
+}
+
+// nodeText returns the text contained in n and its descendants.
+func nodeText(n *html.Node) string {
+ var b bytes.Buffer
+ writeNodeText(n, &b)
+ return b.String()
+}
+
+// nodeOwnText returns the contents of the text nodes that are direct
+// children of n.
+func nodeOwnText(n *html.Node) string {
+ var b bytes.Buffer
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if c.Type == html.TextNode {
+ b.WriteString(c.Data)
+ }
+ }
+ return b.String()
+}
+
+// textSubstrSelector returns a selector that matches nodes that
+// contain the given text.
+func textSubstrSelector(val string) Selector {
+ return func(n *html.Node) bool {
+ text := strings.ToLower(nodeText(n))
+ return strings.Contains(text, val)
+ }
+}
+
+// ownTextSubstrSelector returns a selector that matches nodes that
+// directly contain the given text
+func ownTextSubstrSelector(val string) Selector {
+ return func(n *html.Node) bool {
+ text := strings.ToLower(nodeOwnText(n))
+ return strings.Contains(text, val)
+ }
+}
+
+// textRegexSelector returns a selector that matches nodes whose text matches
+// the specified regular expression
+func textRegexSelector(rx *regexp.Regexp) Selector {
+ return func(n *html.Node) bool {
+ return rx.MatchString(nodeText(n))
+ }
+}
+
+// ownTextRegexSelector returns a selector that matches nodes whose text
+// directly matches the specified regular expression
+func ownTextRegexSelector(rx *regexp.Regexp) Selector {
+ return func(n *html.Node) bool {
+ return rx.MatchString(nodeOwnText(n))
+ }
+}
+
+// hasChildSelector returns a selector that matches elements
+// with a child that matches a.
+func hasChildSelector(a Selector) Selector {
+ return func(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+ return hasChildMatch(n, a)
+ }
+}
+
+// hasDescendantSelector returns a selector that matches elements
+// with any descendant that matches a.
+func hasDescendantSelector(a Selector) Selector {
+ return func(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+ return hasDescendantMatch(n, a)
+ }
+}
+
+// nthChildSelector returns a selector that implements :nth-child(an+b).
+// If last is true, implements :nth-last-child instead.
+// If ofType is true, implements :nth-of-type instead.
+func nthChildSelector(a, b int, last, ofType bool) Selector {
+ return func(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+
+ parent := n.Parent
+ if parent == nil {
+ return false
+ }
+
+ if parent.Type == html.DocumentNode {
+ return false
+ }
+
+ i := -1
+ count := 0
+ for c := parent.FirstChild; c != nil; c = c.NextSibling {
+ if (c.Type != html.ElementNode) || (ofType && c.Data != n.Data) {
+ continue
+ }
+ count++
+ if c == n {
+ i = count
+ if !last {
+ break
+ }
+ }
+ }
+
+ if i == -1 {
+ // This shouldn't happen, since n should always be one of its parent's children.
+ return false
+ }
+
+ if last {
+ i = count - i + 1
+ }
+
+ i -= b
+ if a == 0 {
+ return i == 0
+ }
+
+ return i%a == 0 && i/a >= 0
+ }
+}
+
+// simpleNthChildSelector returns a selector that implements :nth-child(b).
+// If ofType is true, implements :nth-of-type instead.
+func simpleNthChildSelector(b int, ofType bool) Selector {
+ return func(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+
+ parent := n.Parent
+ if parent == nil {
+ return false
+ }
+
+ if parent.Type == html.DocumentNode {
+ return false
+ }
+
+ count := 0
+ for c := parent.FirstChild; c != nil; c = c.NextSibling {
+ if c.Type != html.ElementNode || (ofType && c.Data != n.Data) {
+ continue
+ }
+ count++
+ if c == n {
+ return count == b
+ }
+ if count >= b {
+ return false
+ }
+ }
+ return false
+ }
+}
+
+// simpleNthLastChildSelector returns a selector that implements
+// :nth-last-child(b). If ofType is true, implements :nth-last-of-type
+// instead.
+func simpleNthLastChildSelector(b int, ofType bool) Selector {
+ return func(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+
+ parent := n.Parent
+ if parent == nil {
+ return false
+ }
+
+ if parent.Type == html.DocumentNode {
+ return false
+ }
+
+ count := 0
+ for c := parent.LastChild; c != nil; c = c.PrevSibling {
+ if c.Type != html.ElementNode || (ofType && c.Data != n.Data) {
+ continue
+ }
+ count++
+ if c == n {
+ return count == b
+ }
+ if count >= b {
+ return false
+ }
+ }
+ return false
+ }
+}
+
+// onlyChildSelector returns a selector that implements :only-child.
+// If ofType is true, it implements :only-of-type instead.
+func onlyChildSelector(ofType bool) Selector {
+ return func(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+
+ parent := n.Parent
+ if parent == nil {
+ return false
+ }
+
+ if parent.Type == html.DocumentNode {
+ return false
+ }
+
+ count := 0
+ for c := parent.FirstChild; c != nil; c = c.NextSibling {
+ if (c.Type != html.ElementNode) || (ofType && c.Data != n.Data) {
+ continue
+ }
+ count++
+ if count > 1 {
+ return false
+ }
+ }
+
+ return count == 1
+ }
+}
+
+// inputSelector is a Selector that matches input, select, textarea and button elements.
+func inputSelector(n *html.Node) bool {
+ return n.Type == html.ElementNode && (n.Data == "input" || n.Data == "select" || n.Data == "textarea" || n.Data == "button")
+}
+
+// emptyElementSelector is a Selector that matches empty elements.
+func emptyElementSelector(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ switch c.Type {
+ case html.ElementNode, html.TextNode:
+ return false
+ }
+ }
+
+ return true
+}
+
+// descendantSelector returns a Selector that matches an element if
+// it matches d and has an ancestor that matches a.
+func descendantSelector(a, d Selector) Selector {
+ return func(n *html.Node) bool {
+ if !d(n) {
+ return false
+ }
+
+ for p := n.Parent; p != nil; p = p.Parent {
+ if a(p) {
+ return true
+ }
+ }
+
+ return false
+ }
+}
+
+// childSelector returns a Selector that matches an element if
+// it matches d and its parent matches a.
+func childSelector(a, d Selector) Selector {
+ return func(n *html.Node) bool {
+ return d(n) && n.Parent != nil && a(n.Parent)
+ }
+}
+
+// siblingSelector returns a Selector that matches an element
+// if it matches s2 and in is preceded by an element that matches s1.
+// If adjacent is true, the sibling must be immediately before the element.
+func siblingSelector(s1, s2 Selector, adjacent bool) Selector {
+ return func(n *html.Node) bool {
+ if !s2(n) {
+ return false
+ }
+
+ if adjacent {
+ for n = n.PrevSibling; n != nil; n = n.PrevSibling {
+ if n.Type == html.TextNode || n.Type == html.CommentNode {
+ continue
+ }
+ return s1(n)
+ }
+ return false
+ }
+
+ // Walk backwards looking for element that matches s1
+ for c := n.PrevSibling; c != nil; c = c.PrevSibling {
+ if s1(c) {
+ return true
+ }
+ }
+
+ return false
+ }
+}
+
+// rootSelector implements :root
+func rootSelector(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+ if n.Parent == nil {
+ return false
+ }
+ return n.Parent.Type == html.DocumentNode
+}
diff --git a/vendor/github.com/antchfx/htmlquery/.gitignore b/vendor/github.com/antchfx/htmlquery/.gitignore
new file mode 100644
index 000000000..4d5d27b1d
--- /dev/null
+++ b/vendor/github.com/antchfx/htmlquery/.gitignore
@@ -0,0 +1,32 @@
+# vscode
+.vscode
+debug
+*.test
+
+./build
+
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
\ No newline at end of file
diff --git a/vendor/github.com/antchfx/htmlquery/.travis.yml b/vendor/github.com/antchfx/htmlquery/.travis.yml
new file mode 100644
index 000000000..1f7225628
--- /dev/null
+++ b/vendor/github.com/antchfx/htmlquery/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+
+go:
+ - 1.6
+ - 1.7
+ - 1.8
+
+install:
+ - go get golang.org/x/net/html/charset
+ - go get golang.org/x/net/html
+ - go get github.com/antchfx/xpath
+ - go get github.com/mattn/goveralls
+
+script:
+ - $HOME/gopath/bin/goveralls -service=travis-ci
\ No newline at end of file
diff --git a/vendor/github.com/antchfx/htmlquery/LICENSE b/vendor/github.com/antchfx/htmlquery/LICENSE
new file mode 100644
index 000000000..e14c37141
--- /dev/null
+++ b/vendor/github.com/antchfx/htmlquery/LICENSE
@@ -0,0 +1,17 @@
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/antchfx/htmlquery/README.md b/vendor/github.com/antchfx/htmlquery/README.md
new file mode 100644
index 000000000..0f466cb0f
--- /dev/null
+++ b/vendor/github.com/antchfx/htmlquery/README.md
@@ -0,0 +1,102 @@
+htmlquery
+====
+[![Build Status](https://travis-ci.org/antchfx/htmlquery.svg?branch=master)](https://travis-ci.org/antchfx/htmlquery)
+[![Coverage Status](https://coveralls.io/repos/github/antchfx/htmlquery/badge.svg?branch=master)](https://coveralls.io/github/antchfx/htmlquery?branch=master)
+[![GoDoc](https://godoc.org/github.com/antchfx/htmlquery?status.svg)](https://godoc.org/github.com/antchfx/htmlquery)
+[![Go Report Card](https://goreportcard.com/badge/github.com/antchfx/htmlquery)](https://goreportcard.com/report/github.com/antchfx/htmlquery)
+
+Overview
+====
+
+htmlquery is an XPath query package for HTML, lets you extract data or evaluate from HTML documents by an XPath expression.
+
+Changelogs
+===
+
+2019-02-04
+- [#7](https://github.com/antchfx/htmlquery/issues/7) Removed deprecated `FindEach()` and `FindEachWithBreak()` methods.
+
+2018-12-28
+- Avoid adding duplicate elements to list for `Find()` method. [#6](https://github.com/antchfx/htmlquery/issues/6)
+
+Installation
+====
+
+> $ go get github.com/antchfx/htmlquery
+
+Getting Started
+====
+
+#### Load HTML document from URL.
+
+```go
+doc, err := htmlquery.LoadURL("http://example.com/")
+```
+
+#### Load HTML document from string.
+
+```go
+s := `....`
+doc, err := htmlquery.Parse(strings.NewReader(s))
+```
+
+#### Find all A elements.
+
+```go
+list := htmlquery.Find(doc, "//a")
+```
+
+#### Find all A elements that have `href` attribute.
+
+```go
+list := range htmlquery.Find(doc, "//a[@href]")
+```
+
+#### Find all A elements and only get `href` attribute self.
+
+```go
+list := range htmlquery.Find(doc, "//a/@href")
+```
+
+### Find the third A element.
+
+```go
+a := htmlquery.FindOne(doc, "//a[3]")
+```
+
+#### Evaluate the number of all IMG element.
+
+```go
+expr, _ := xpath.Compile("count(//img)")
+v := expr.Evaluate(htmlquery.CreateXPathNavigator(doc)).(float64)
+fmt.Printf("total count is %f", v)
+```
+
+Quick Tutorial
+===
+
+```go
+func main() {
+ doc, err := htmlquery.LoadURL("https://www.bing.com/search?q=golang")
+ if err != nil {
+ panic(err)
+ }
+ // Find all news item.
+ for i, n := range htmlquery.Find(doc, "//ol/li") {
+ a := htmlquery.FindOne(n, "//a")
+ fmt.Printf("%d %s(%s)\n", i, htmlquery.InnerText(a), htmlquery.SelectAttr(a, "href"))
+ }
+}
+```
+
+List of supported XPath query packages
+===
+|Name |Description |
+|--------------------------|----------------|
+|[htmlquery](https://github.com/antchfx/htmlquery) | XPath query package for the HTML document|
+|[xmlquery](https://github.com/antchfx/xmlquery) | XPath query package for the XML document|
+|[jsonquery](https://github.com/antchfx/jsonquery) | XPath query package for the JSON document|
+
+Questions
+===
+Please let me know if you have any questions.
diff --git a/vendor/github.com/antchfx/htmlquery/query.go b/vendor/github.com/antchfx/htmlquery/query.go
new file mode 100644
index 000000000..37d30b937
--- /dev/null
+++ b/vendor/github.com/antchfx/htmlquery/query.go
@@ -0,0 +1,291 @@
+/*
+Package htmlquery provides extract data from HTML documents using XPath expression.
+*/
+package htmlquery
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/antchfx/xpath"
+ "golang.org/x/net/html"
+ "golang.org/x/net/html/charset"
+)
+
+var _ xpath.NodeNavigator = &NodeNavigator{}
+
+// CreateXPathNavigator creates a new xpath.NodeNavigator for the specified html.Node.
+func CreateXPathNavigator(top *html.Node) *NodeNavigator {
+ return &NodeNavigator{curr: top, root: top, attr: -1}
+}
+
+// Find searches the html.Node that matches by the specified XPath expr.
+func Find(top *html.Node, expr string) []*html.Node {
+ exp, err := xpath.Compile(expr)
+ if err != nil {
+ panic(err)
+ }
+ var elems []*html.Node
+ t := exp.Select(CreateXPathNavigator(top))
+ for t.MoveNext() {
+ nav := t.Current().(*NodeNavigator)
+ n := getCurrentNode(nav)
+ // avoid adding duplicate nodes.
+ if len(elems) > 0 && (elems[0] == n || (nav.NodeType() == xpath.AttributeNode &&
+ nav.LocalName() == elems[0].Data && nav.Value() == InnerText(elems[0]))) {
+ continue
+ }
+ elems = append(elems, n)
+ }
+ return elems
+}
+
+// FindOne searches the html.Node that matches by the specified XPath expr,
+// and returns first element of matched html.Node.
+func FindOne(top *html.Node, expr string) *html.Node {
+ var elem *html.Node
+ exp, err := xpath.Compile(expr)
+ if err != nil {
+ panic(err)
+ }
+ t := exp.Select(CreateXPathNavigator(top))
+ if t.MoveNext() {
+ elem = getCurrentNode(t.Current().(*NodeNavigator))
+ }
+ return elem
+}
+
+// LoadURL loads the HTML document from the specified URL.
+func LoadURL(url string) (*html.Node, error) {
+ resp, err := http.Get(url)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ r, err := charset.NewReader(resp.Body, resp.Header.Get("Content-Type"))
+ if err != nil {
+ return nil, err
+ }
+ return html.Parse(r)
+}
+
+func getCurrentNode(n *NodeNavigator) *html.Node {
+ if n.NodeType() == xpath.AttributeNode {
+ childNode := &html.Node{
+ Type: html.TextNode,
+ Data: n.Value(),
+ }
+ return &html.Node{
+ Type: html.ElementNode,
+ Data: n.LocalName(),
+ FirstChild: childNode,
+ LastChild: childNode,
+ }
+
+ }
+ return n.curr
+}
+
+// Parse returns the parse tree for the HTML from the given Reader.
+func Parse(r io.Reader) (*html.Node, error) {
+ return html.Parse(r)
+}
+
+// InnerText returns the text between the start and end tags of the object.
+func InnerText(n *html.Node) string {
+ var output func(*bytes.Buffer, *html.Node)
+ output = func(buf *bytes.Buffer, n *html.Node) {
+ switch n.Type {
+ case html.TextNode:
+ buf.WriteString(n.Data)
+ return
+ case html.CommentNode:
+ return
+ }
+ for child := n.FirstChild; child != nil; child = child.NextSibling {
+ output(buf, child)
+ }
+ }
+
+ var buf bytes.Buffer
+ output(&buf, n)
+ return buf.String()
+}
+
+// SelectAttr returns the attribute value with the specified name.
+func SelectAttr(n *html.Node, name string) (val string) {
+ if n == nil {
+ return
+ }
+ if n.Type == html.ElementNode && n.Parent == nil && name == n.Data {
+ return InnerText(n)
+ }
+ for _, attr := range n.Attr {
+ if attr.Key == name {
+ val = attr.Val
+ break
+ }
+ }
+ return
+}
+
+// OutputHTML returns the text including tags name.
+func OutputHTML(n *html.Node, self bool) string {
+ var buf bytes.Buffer
+ if self {
+ html.Render(&buf, n)
+ } else {
+ for n := n.FirstChild; n != nil; n = n.NextSibling {
+ html.Render(&buf, n)
+ }
+ }
+ return buf.String()
+}
+
+type NodeNavigator struct {
+ root, curr *html.Node
+ attr int
+}
+
+func (h *NodeNavigator) Current() *html.Node {
+ return h.curr
+}
+
+func (h *NodeNavigator) NodeType() xpath.NodeType {
+ switch h.curr.Type {
+ case html.CommentNode:
+ return xpath.CommentNode
+ case html.TextNode:
+ return xpath.TextNode
+ case html.DocumentNode:
+ return xpath.RootNode
+ case html.ElementNode:
+ if h.attr != -1 {
+ return xpath.AttributeNode
+ }
+ return xpath.ElementNode
+ case html.DoctypeNode:
+ // ignored declare and as Root-Node type.
+ return xpath.RootNode
+ }
+ panic(fmt.Sprintf("unknown HTML node type: %v", h.curr.Type))
+}
+
+func (h *NodeNavigator) LocalName() string {
+ if h.attr != -1 {
+ return h.curr.Attr[h.attr].Key
+ }
+ return h.curr.Data
+}
+
+func (*NodeNavigator) Prefix() string {
+ return ""
+}
+
+func (h *NodeNavigator) Value() string {
+ switch h.curr.Type {
+ case html.CommentNode:
+ return h.curr.Data
+ case html.ElementNode:
+ if h.attr != -1 {
+ return h.curr.Attr[h.attr].Val
+ }
+ return InnerText(h.curr)
+ case html.TextNode:
+ return h.curr.Data
+ }
+ return ""
+}
+
+func (h *NodeNavigator) Copy() xpath.NodeNavigator {
+ n := *h
+ return &n
+}
+
+func (h *NodeNavigator) MoveToRoot() {
+ h.curr = h.root
+}
+
+func (h *NodeNavigator) MoveToParent() bool {
+ if h.attr != -1 {
+ h.attr = -1
+ return true
+ } else if node := h.curr.Parent; node != nil {
+ h.curr = node
+ return true
+ }
+ return false
+}
+
+func (h *NodeNavigator) MoveToNextAttribute() bool {
+ if h.attr >= len(h.curr.Attr)-1 {
+ return false
+ }
+ h.attr++
+ return true
+}
+
+func (h *NodeNavigator) MoveToChild() bool {
+ if h.attr != -1 {
+ return false
+ }
+ if node := h.curr.FirstChild; node != nil {
+ h.curr = node
+ return true
+ }
+ return false
+}
+
+func (h *NodeNavigator) MoveToFirst() bool {
+ if h.attr != -1 || h.curr.PrevSibling == nil {
+ return false
+ }
+ for {
+ node := h.curr.PrevSibling
+ if node == nil {
+ break
+ }
+ h.curr = node
+ }
+ return true
+}
+
+func (h *NodeNavigator) String() string {
+ return h.Value()
+}
+
+func (h *NodeNavigator) MoveToNext() bool {
+ if h.attr != -1 {
+ return false
+ }
+ if node := h.curr.NextSibling; node != nil {
+ h.curr = node
+ return true
+ }
+ return false
+}
+
+func (h *NodeNavigator) MoveToPrevious() bool {
+ if h.attr != -1 {
+ return false
+ }
+ if node := h.curr.PrevSibling; node != nil {
+ h.curr = node
+ return true
+ }
+ return false
+}
+
+func (h *NodeNavigator) MoveTo(other xpath.NodeNavigator) bool {
+ node, ok := other.(*NodeNavigator)
+ if !ok || node.root != h.root {
+ return false
+ }
+
+ h.curr = node.curr
+ h.attr = node.attr
+ return true
+}
diff --git a/vendor/github.com/antchfx/xmlquery/.gitignore b/vendor/github.com/antchfx/xmlquery/.gitignore
new file mode 100644
index 000000000..4d5d27b1d
--- /dev/null
+++ b/vendor/github.com/antchfx/xmlquery/.gitignore
@@ -0,0 +1,32 @@
+# vscode
+.vscode
+debug
+*.test
+
+./build
+
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
\ No newline at end of file
diff --git a/vendor/github.com/antchfx/xmlquery/.travis.yml b/vendor/github.com/antchfx/xmlquery/.travis.yml
new file mode 100644
index 000000000..d9a7bb893
--- /dev/null
+++ b/vendor/github.com/antchfx/xmlquery/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.6
+ - 1.7
+ - 1.8
+
+install:
+ - go get golang.org/x/net/html/charset
+ - go get github.com/antchfx/xpath
+ - go get github.com/mattn/goveralls
+
+script:
+ - $HOME/gopath/bin/goveralls -service=travis-ci
\ No newline at end of file
diff --git a/vendor/github.com/antchfx/xmlquery/LICENSE b/vendor/github.com/antchfx/xmlquery/LICENSE
new file mode 100644
index 000000000..e14c37141
--- /dev/null
+++ b/vendor/github.com/antchfx/xmlquery/LICENSE
@@ -0,0 +1,17 @@
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/antchfx/xmlquery/README.md b/vendor/github.com/antchfx/xmlquery/README.md
new file mode 100644
index 000000000..6683afd51
--- /dev/null
+++ b/vendor/github.com/antchfx/xmlquery/README.md
@@ -0,0 +1,186 @@
+xmlquery
+====
+[![Build Status](https://travis-ci.org/antchfx/xmlquery.svg?branch=master)](https://travis-ci.org/antchfx/xmlquery)
+[![Coverage Status](https://coveralls.io/repos/github/antchfx/xmlquery/badge.svg?branch=master)](https://coveralls.io/github/antchfx/xmlquery?branch=master)
+[![GoDoc](https://godoc.org/github.com/antchfx/xmlquery?status.svg)](https://godoc.org/github.com/antchfx/xmlquery)
+[![Go Report Card](https://goreportcard.com/badge/github.com/antchfx/xmlquery)](https://goreportcard.com/report/github.com/antchfx/xmlquery)
+
+Overview
+===
+
+xmlquery is an XPath query package for XML document, lets you extract data or evaluate from XML documents by an XPath expression.
+
+Change Logs
+===
+
+**2018-12-23**
+* added XML output will including comment node. [#9](https://github.com/antchfx/xmlquery/issues/9)
+
+**2018-12-03**
+ * added support attribute name with namespace prefix and XML output. [#6](https://github.com/antchfx/xmlquery/issues/6)
+
+Installation
+====
+
+> $ go get github.com/antchfx/xmlquery
+
+Getting Started
+===
+
+#### Parse a XML from URL.
+
+```go
+doc, err := xmlquery.LoadURL("http://www.example.com/sitemap.xml")
+```
+
+#### Parse a XML from string.
+
+```go
+s := ``
+doc, err := xmlquery.Parse(strings.NewReader(s))
+```
+
+#### Parse a XML from io.Reader.
+
+```go
+f, err := os.Open("../books.xml")
+doc, err := xmlquery.Parse(f)
+```
+
+#### Find authors of all books in the bookstore.
+
+```go
+list := xmlquery.Find(doc, "//book//author")
+// or
+list := xmlquery.Find(doc, "//author")
+```
+
+#### Find the second book.
+
+```go
+book := xmlquery.FindOne(doc, "//book[2]")
+```
+
+#### Find all book elements and only get `id` attribute self. (New Feature)
+
+```go
+list := xmlquery.Find(doc,"//book/@id")
+```
+
+#### Find all books with id is bk104.
+
+```go
+list := xmlquery.Find(doc, "//book[@id='bk104']")
+```
+
+#### Find all books that price less than 5.
+
+```go
+list := xmlquery.Find(doc, "//book[price<5]")
+```
+
+#### Evaluate the total price of all books.
+
+```go
+expr, err := xpath.Compile("sum(//book/price)")
+price := expr.Evaluate(xmlquery.CreateXPathNavigator(doc)).(float64)
+fmt.Printf("total price: %f\n", price)
+```
+
+#### Evaluate the number of all books element.
+
+```go
+expr, err := xpath.Compile("count(//book)")
+price := expr.Evaluate(xmlquery.CreateXPathNavigator(doc)).(float64)
+```
+
+#### Create XML document.
+
+```go
+doc := &xmlquery.Node{
+ Type: xmlquery.DeclarationNode,
+ Data: "xml",
+ Attr: []xml.Attr{
+ xml.Attr{Name: xml.Name{Local: "version"}, Value: "1.0"},
+ },
+}
+root := &xmlquery.Node{
+ Data: "rss",
+ Type: xmlquery.ElementNode,
+}
+doc.FirstChild = root
+channel := &xmlquery.Node{
+ Data: "channel",
+ Type: xmlquery.ElementNode,
+}
+root.FirstChild = channel
+title := &xmlquery.Node{
+ Data: "title",
+ Type: xmlquery.ElementNode,
+}
+title_text := &xmlquery.Node{
+ Data: "W3Schools Home Page",
+ Type: xmlquery.TextNode,
+}
+title.FirstChild = title_text
+channel.FirstChild = title
+fmt.Println(doc.OutputXML(true))
+// W3Schools Home Page
+```
+
+Quick Tutorial
+===
+
+```go
+import (
+ "github.com/antchfx/xmlquery"
+)
+
+func main(){
+ s := `
+
+
+ W3Schools Home Page
+ https://www.w3schools.com
+ Free web building tutorials
+
+ RSS Tutorial
+ https://www.w3schools.com/xml/xml_rss.asp
+ New RSS tutorial on W3Schools
+
+
+ XML Tutorial
+ https://www.w3schools.com/xml
+ New XML tutorial on W3Schools
+
+
+`
+
+ doc, err := xmlquery.Parse(strings.NewReader(s))
+ if err != nil {
+ panic(err)
+ }
+ channel := xmlquery.FindOne(doc, "//channel")
+ if n := channel.SelectElement("title"); n != nil {
+ fmt.Printf("title: %s\n", n.InnerText())
+ }
+ if n := channel.SelectElement("link"); n != nil {
+ fmt.Printf("link: %s\n", n.InnerText())
+ }
+ for i, n := range xmlquery.Find(doc, "//item/title") {
+ fmt.Printf("#%d %s\n", i, n.InnerText())
+ }
+}
+```
+
+List of supported XPath query packages
+===
+|Name |Description |
+|--------------------------|----------------|
+|[htmlquery](https://github.com/antchfx/htmlquery) | XPath query package for the HTML document|
+|[xmlquery](https://github.com/antchfx/xmlquery) | XPath query package for the XML document|
+|[jsonquery](https://github.com/antchfx/jsonquery) | XPath query package for the JSON document|
+
+ Questions
+===
+Please let me know if you have any questions
diff --git a/vendor/github.com/antchfx/xmlquery/books.xml b/vendor/github.com/antchfx/xmlquery/books.xml
new file mode 100644
index 000000000..85a74b588
--- /dev/null
+++ b/vendor/github.com/antchfx/xmlquery/books.xml
@@ -0,0 +1,121 @@
+
+
+
+
+ Gambardella, Matthew
+ XML Developer's Guide
+ Computer
+ 44.95
+ 2000-10-01
+ An in-depth look at creating applications
+ with XML.
+
+
+ Ralls, Kim
+ Midnight Rain
+ Fantasy
+ 5.95
+ 2000-12-16
+ A former architect battles corporate zombies,
+ an evil sorceress, and her own childhood to become queen
+ of the world.
+
+
+ Corets, Eva
+ Maeve Ascendant
+ Fantasy
+ 5.95
+ 2000-11-17
+ After the collapse of a nanotechnology
+ society in England, the young survivors lay the
+ foundation for a new society.
+
+
+ Corets, Eva
+ Oberon's Legacy
+ Fantasy
+ 5.95
+ 2001-03-10
+ In post-apocalypse England, the mysterious
+ agent known only as Oberon helps to create a new life
+ for the inhabitants of London. Sequel to Maeve
+ Ascendant.
+
+
+ Corets, Eva
+ The Sundered Grail
+ Fantasy
+ 5.95
+ 2001-09-10
+ The two daughters of Maeve, half-sisters,
+ battle one another for control of England. Sequel to
+ Oberon's Legacy.
+
+
+ Randall, Cynthia
+ Lover Birds
+ Romance
+ 4.95
+ 2000-09-02
+ When Carla meets Paul at an ornithology
+ conference, tempers fly as feathers get ruffled.
+
+
+ Thurman, Paula
+ Splish Splash
+ Romance
+ 4.95
+ 2000-11-02
+ A deep sea diver finds true love twenty
+ thousand leagues beneath the sea.
+
+
+ Knorr, Stefan
+ Creepy Crawlies
+ Horror
+ 4.95
+ 2000-12-06
+ An anthology of horror stories about roaches,
+ centipedes, scorpions and other insects.
+
+
+ Kress, Peter
+ Paradox Lost
+ Science Fiction
+ 6.95
+ 2000-11-02
+ After an inadvertant trip through a Heisenberg
+ Uncertainty Device, James Salway discovers the problems
+ of being quantum.
+
+
+ O'Brien, Tim
+ Microsoft .NET: The Programming Bible
+ Computer
+ 36.95
+ 2000-12-09
+ Microsoft's .NET initiative is explored in
+ detail in this deep programmer's reference.
+
+
+ O'Brien, Tim
+ MSXML3: A Comprehensive Guide
+ Computer
+ 36.95
+ 2000-12-01
+ The Microsoft MSXML3 parser is covered in
+ detail, with attention to XML DOM interfaces, XSLT processing,
+ SAX and more.
+
+
+ Galos, Mike
+ Visual Studio 7: A Comprehensive Guide
+ Computer
+ 49.95
+ 2001-04-16
+ Microsoft Visual Studio 7 is explored in depth,
+ looking at how Visual Basic, Visual C++, C#, and ASP+ are
+ integrated into a comprehensive development
+ environment.
+
+
\ No newline at end of file
diff --git a/vendor/github.com/antchfx/xmlquery/node.go b/vendor/github.com/antchfx/xmlquery/node.go
new file mode 100644
index 000000000..d0e6a5427
--- /dev/null
+++ b/vendor/github.com/antchfx/xmlquery/node.go
@@ -0,0 +1,302 @@
+package xmlquery
+
+import (
+ "bytes"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+
+ "golang.org/x/net/html/charset"
+)
+
+// A NodeType is the type of a Node.
+type NodeType uint
+
+const (
+ // DocumentNode is a document object that, as the root of the document tree,
+ // provides access to the entire XML document.
+ DocumentNode NodeType = iota
+ // DeclarationNode is the document type declaration, indicated by the following
+ // tag (for example, ).
+ DeclarationNode
+ // ElementNode is an element (for example, ).
+ ElementNode
+ // TextNode is the text content of a node.
+ TextNode
+ // CommentNode a comment (for example, ).
+ CommentNode
+ // AttributeNode is an attribute of element.
+ AttributeNode
+)
+
+// A Node consists of a NodeType and some Data (tag name for
+// element nodes, content for text) and are part of a tree of Nodes.
+type Node struct {
+ Parent, FirstChild, LastChild, PrevSibling, NextSibling *Node
+
+ Type NodeType
+ Data string
+ Prefix string
+ NamespaceURI string
+ Attr []xml.Attr
+
+ level int // node level in the tree
+}
+
+// InnerText returns the text between the start and end tags of the object.
+func (n *Node) InnerText() string {
+ var output func(*bytes.Buffer, *Node)
+ output = func(buf *bytes.Buffer, n *Node) {
+ switch n.Type {
+ case TextNode:
+ buf.WriteString(n.Data)
+ return
+ case CommentNode:
+ return
+ }
+ for child := n.FirstChild; child != nil; child = child.NextSibling {
+ output(buf, child)
+ }
+ }
+
+ var buf bytes.Buffer
+ output(&buf, n)
+ return buf.String()
+}
+
+func outputXML(buf *bytes.Buffer, n *Node) {
+ if n.Type == TextNode {
+ xml.EscapeText(buf, []byte(strings.TrimSpace(n.Data)))
+ return
+ }
+ if n.Type == CommentNode {
+ buf.WriteString("")
+ return
+ }
+ if n.Type == DeclarationNode {
+ buf.WriteString("" + n.Data)
+ } else {
+ if n.Prefix == "" {
+ buf.WriteString("<" + n.Data)
+ } else {
+ buf.WriteString("<" + n.Prefix + ":" + n.Data)
+ }
+ }
+
+ for _, attr := range n.Attr {
+ if attr.Name.Space != "" {
+ buf.WriteString(fmt.Sprintf(` %s:%s="%s"`, attr.Name.Space, attr.Name.Local, attr.Value))
+ } else {
+ buf.WriteString(fmt.Sprintf(` %s="%s"`, attr.Name.Local, attr.Value))
+ }
+ }
+ if n.Type == DeclarationNode {
+ buf.WriteString("?>")
+ } else {
+ buf.WriteString(">")
+ }
+ for child := n.FirstChild; child != nil; child = child.NextSibling {
+ outputXML(buf, child)
+ }
+ if n.Type != DeclarationNode {
+ if n.Prefix == "" {
+ buf.WriteString(fmt.Sprintf("%s>", n.Data))
+ } else {
+ buf.WriteString(fmt.Sprintf("%s:%s>", n.Prefix, n.Data))
+ }
+ }
+}
+
+// OutputXML returns the text that including tags name.
+func (n *Node) OutputXML(self bool) string {
+ var buf bytes.Buffer
+ if self {
+ outputXML(&buf, n)
+ } else {
+ for n := n.FirstChild; n != nil; n = n.NextSibling {
+ outputXML(&buf, n)
+ }
+ }
+
+ return buf.String()
+}
+
+func addAttr(n *Node, key, val string) {
+ var attr xml.Attr
+ if i := strings.Index(key, ":"); i > 0 {
+ attr = xml.Attr{
+ Name: xml.Name{Space: key[:i], Local: key[i+1:]},
+ Value: val,
+ }
+ } else {
+ attr = xml.Attr{
+ Name: xml.Name{Local: key},
+ Value: val,
+ }
+ }
+
+ n.Attr = append(n.Attr, attr)
+}
+
+func addChild(parent, n *Node) {
+ n.Parent = parent
+ if parent.FirstChild == nil {
+ parent.FirstChild = n
+ } else {
+ parent.LastChild.NextSibling = n
+ n.PrevSibling = parent.LastChild
+ }
+
+ parent.LastChild = n
+}
+
+func addSibling(sibling, n *Node) {
+ for t := sibling.NextSibling; t != nil; t = t.NextSibling {
+ sibling = t
+ }
+ n.Parent = sibling.Parent
+ sibling.NextSibling = n
+ n.PrevSibling = sibling
+ if sibling.Parent != nil {
+ sibling.Parent.LastChild = n
+ }
+}
+
+// LoadURL loads the XML document from the specified URL.
+func LoadURL(url string) (*Node, error) {
+ resp, err := http.Get(url)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ return parse(resp.Body)
+}
+
+func parse(r io.Reader) (*Node, error) {
+ var (
+ decoder = xml.NewDecoder(r)
+ doc = &Node{Type: DocumentNode}
+ space2prefix = make(map[string]string)
+ level = 0
+ )
+ // http://www.w3.org/XML/1998/namespace is bound by definition to the prefix xml.
+ space2prefix["http://www.w3.org/XML/1998/namespace"] = "xml"
+ decoder.CharsetReader = charset.NewReaderLabel
+ prev := doc
+ for {
+ tok, err := decoder.Token()
+ switch {
+ case err == io.EOF:
+ goto quit
+ case err != nil:
+ return nil, err
+ }
+
+ switch tok := tok.(type) {
+ case xml.StartElement:
+ if level == 0 {
+ // mising XML declaration
+ node := &Node{Type: DeclarationNode, Data: "xml", level: 1}
+ addChild(prev, node)
+ level = 1
+ prev = node
+ }
+ // https://www.w3.org/TR/xml-names/#scoping-defaulting
+ for _, att := range tok.Attr {
+ if att.Name.Local == "xmlns" {
+ space2prefix[att.Value] = ""
+ } else if att.Name.Space == "xmlns" {
+ space2prefix[att.Value] = att.Name.Local
+ }
+ }
+
+ if tok.Name.Space != "" {
+ if _, found := space2prefix[tok.Name.Space]; !found {
+ return nil, errors.New("xmlquery: invalid XML document, namespace is missing")
+ }
+ }
+
+ for i := 0; i < len(tok.Attr); i++ {
+ att := &tok.Attr[i]
+ if prefix, ok := space2prefix[att.Name.Space]; ok {
+ att.Name.Space = prefix
+ }
+ }
+
+ node := &Node{
+ Type: ElementNode,
+ Data: tok.Name.Local,
+ Prefix: space2prefix[tok.Name.Space],
+ NamespaceURI: tok.Name.Space,
+ Attr: tok.Attr,
+ level: level,
+ }
+ //fmt.Println(fmt.Sprintf("start > %s : %d", node.Data, level))
+ if level == prev.level {
+ addSibling(prev, node)
+ } else if level > prev.level {
+ addChild(prev, node)
+ } else if level < prev.level {
+ for i := prev.level - level; i > 1; i-- {
+ prev = prev.Parent
+ }
+ addSibling(prev.Parent, node)
+ }
+ prev = node
+ level++
+ case xml.EndElement:
+ level--
+ case xml.CharData:
+ node := &Node{Type: TextNode, Data: string(tok), level: level}
+ if level == prev.level {
+ addSibling(prev, node)
+ } else if level > prev.level {
+ addChild(prev, node)
+ }
+ case xml.Comment:
+ node := &Node{Type: CommentNode, Data: string(tok), level: level}
+ if level == prev.level {
+ addSibling(prev, node)
+ } else if level > prev.level {
+ addChild(prev, node)
+ } else if level < prev.level {
+ for i := prev.level - level; i > 1; i-- {
+ prev = prev.Parent
+ }
+ addSibling(prev.Parent, node)
+ }
+ case xml.ProcInst: // Processing Instruction
+ if prev.Type != DeclarationNode {
+ level++
+ }
+ node := &Node{Type: DeclarationNode, Data: tok.Target, level: level}
+ pairs := strings.Split(string(tok.Inst), " ")
+ for _, pair := range pairs {
+ pair = strings.TrimSpace(pair)
+ if i := strings.Index(pair, "="); i > 0 {
+ addAttr(node, pair[:i], strings.Trim(pair[i+1:], `"`))
+ }
+ }
+ if level == prev.level {
+ addSibling(prev, node)
+ } else if level > prev.level {
+ addChild(prev, node)
+ }
+ prev = node
+ case xml.Directive:
+ }
+
+ }
+quit:
+ return doc, nil
+}
+
+// Parse returns the parse tree for the XML from the given Reader.
+func Parse(r io.Reader) (*Node, error) {
+ return parse(r)
+}
diff --git a/vendor/github.com/antchfx/xmlquery/query.go b/vendor/github.com/antchfx/xmlquery/query.go
new file mode 100644
index 000000000..e3a0db728
--- /dev/null
+++ b/vendor/github.com/antchfx/xmlquery/query.go
@@ -0,0 +1,264 @@
+/*
+Package xmlquery provides extract data from XML documents using XPath expression.
+*/
+package xmlquery
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/antchfx/xpath"
+)
+
+// SelectElements finds child elements with the specified name.
+func (n *Node) SelectElements(name string) []*Node {
+ return Find(n, name)
+}
+
+// SelectElement finds child elements with the specified name.
+func (n *Node) SelectElement(name string) *Node {
+ return FindOne(n, name)
+}
+
+// SelectAttr returns the attribute value with the specified name.
+func (n *Node) SelectAttr(name string) string {
+ if n.Type == AttributeNode {
+ if n.Data == name {
+ return n.InnerText()
+ }
+ return ""
+ }
+ var local, space string
+ local = name
+ if i := strings.Index(name, ":"); i > 0 {
+ space = name[:i]
+ local = name[i+1:]
+ }
+ for _, attr := range n.Attr {
+ if attr.Name.Local == local && attr.Name.Space == space {
+ return attr.Value
+ }
+ }
+ return ""
+}
+
+var _ xpath.NodeNavigator = &NodeNavigator{}
+
+// CreateXPathNavigator creates a new xpath.NodeNavigator for the specified html.Node.
+func CreateXPathNavigator(top *Node) *NodeNavigator {
+ return &NodeNavigator{curr: top, root: top, attr: -1}
+}
+
+func getCurrentNode(it *xpath.NodeIterator) *Node {
+ n := it.Current().(*NodeNavigator)
+ if n.NodeType() == xpath.AttributeNode {
+ childNode := &Node{
+ Type: TextNode,
+ Data: n.Value(),
+ }
+ return &Node{
+ Type: AttributeNode,
+ Data: n.LocalName(),
+ FirstChild: childNode,
+ LastChild: childNode,
+ }
+ }
+ return n.curr
+}
+
+// Find searches the Node that matches by the specified XPath expr.
+func Find(top *Node, expr string) []*Node {
+ exp, err := xpath.Compile(expr)
+ if err != nil {
+ panic(err)
+ }
+ t := exp.Select(CreateXPathNavigator(top))
+ var elems []*Node
+ for t.MoveNext() {
+ elems = append(elems, getCurrentNode(t))
+ }
+ return elems
+}
+
+// FindOne searches the Node that matches by the specified XPath expr,
+// and returns first element of matched.
+func FindOne(top *Node, expr string) *Node {
+ exp, err := xpath.Compile(expr)
+ if err != nil {
+ panic(err)
+ }
+ t := exp.Select(CreateXPathNavigator(top))
+ var elem *Node
+ if t.MoveNext() {
+ elem = getCurrentNode(t)
+ }
+ return elem
+}
+
+// FindEach searches the html.Node and calls functions cb.
+// Important: this method has deprecated, recommend use for .. = range Find(){}.
+func FindEach(top *Node, expr string, cb func(int, *Node)) {
+ for i, n := range Find(top, expr) {
+ cb(i, n)
+ }
+}
+
+// FindEachWithBreak functions the same as FindEach but allows you
+// to break the loop by returning false from your callback function, cb.
+// Important: this method has deprecated, recommend use for .. = range Find(){}.
+func FindEachWithBreak(top *Node, expr string, cb func(int, *Node) bool) {
+ for i, n := range Find(top, expr) {
+ if !cb(i, n) {
+ break
+ }
+ }
+}
+
+type NodeNavigator struct {
+ root, curr *Node
+ attr int
+}
+
+func (x *NodeNavigator) Current() *Node {
+ return x.curr
+}
+
+func (x *NodeNavigator) NodeType() xpath.NodeType {
+ switch x.curr.Type {
+ case CommentNode:
+ return xpath.CommentNode
+ case TextNode:
+ return xpath.TextNode
+ case DeclarationNode, DocumentNode:
+ return xpath.RootNode
+ case ElementNode:
+ if x.attr != -1 {
+ return xpath.AttributeNode
+ }
+ return xpath.ElementNode
+ }
+ panic(fmt.Sprintf("unknown XML node type: %v", x.curr.Type))
+}
+
+func (x *NodeNavigator) LocalName() string {
+ if x.attr != -1 {
+ return x.curr.Attr[x.attr].Name.Local
+ }
+ return x.curr.Data
+
+}
+
+func (x *NodeNavigator) Prefix() string {
+ if x.NodeType() == xpath.AttributeNode {
+ if x.attr != -1 {
+ return x.curr.Attr[x.attr].Name.Space
+ }
+ return ""
+ }
+ return x.curr.Prefix
+}
+
+func (x *NodeNavigator) Value() string {
+ switch x.curr.Type {
+ case CommentNode:
+ return x.curr.Data
+ case ElementNode:
+ if x.attr != -1 {
+ return x.curr.Attr[x.attr].Value
+ }
+ return x.curr.InnerText()
+ case TextNode:
+ return x.curr.Data
+ }
+ return ""
+}
+
+func (x *NodeNavigator) Copy() xpath.NodeNavigator {
+ n := *x
+ return &n
+}
+
+func (x *NodeNavigator) MoveToRoot() {
+ x.curr = x.root
+}
+
+func (x *NodeNavigator) MoveToParent() bool {
+ if x.attr != -1 {
+ x.attr = -1
+ return true
+ } else if node := x.curr.Parent; node != nil {
+ x.curr = node
+ return true
+ }
+ return false
+}
+
+func (x *NodeNavigator) MoveToNextAttribute() bool {
+ if x.attr >= len(x.curr.Attr)-1 {
+ return false
+ }
+ x.attr++
+ return true
+}
+
+func (x *NodeNavigator) MoveToChild() bool {
+ if x.attr != -1 {
+ return false
+ }
+ if node := x.curr.FirstChild; node != nil {
+ x.curr = node
+ return true
+ }
+ return false
+}
+
+func (x *NodeNavigator) MoveToFirst() bool {
+ if x.attr != -1 || x.curr.PrevSibling == nil {
+ return false
+ }
+ for {
+ node := x.curr.PrevSibling
+ if node == nil {
+ break
+ }
+ x.curr = node
+ }
+ return true
+}
+
+func (x *NodeNavigator) String() string {
+ return x.Value()
+}
+
+func (x *NodeNavigator) MoveToNext() bool {
+ if x.attr != -1 {
+ return false
+ }
+ if node := x.curr.NextSibling; node != nil {
+ x.curr = node
+ return true
+ }
+ return false
+}
+
+func (x *NodeNavigator) MoveToPrevious() bool {
+ if x.attr != -1 {
+ return false
+ }
+ if node := x.curr.PrevSibling; node != nil {
+ x.curr = node
+ return true
+ }
+ return false
+}
+
+func (x *NodeNavigator) MoveTo(other xpath.NodeNavigator) bool {
+ node, ok := other.(*NodeNavigator)
+ if !ok || node.root != x.root {
+ return false
+ }
+
+ x.curr = node.curr
+ x.attr = node.attr
+ return true
+}
diff --git a/vendor/github.com/fatih/camelcase/.travis.yml b/vendor/github.com/fatih/camelcase/.travis.yml
new file mode 100644
index 000000000..3489e3871
--- /dev/null
+++ b/vendor/github.com/fatih/camelcase/.travis.yml
@@ -0,0 +1,3 @@
+language: go
+go: 1.x
+
diff --git a/vendor/github.com/fatih/camelcase/LICENSE.md b/vendor/github.com/fatih/camelcase/LICENSE.md
new file mode 100644
index 000000000..aa4a536ca
--- /dev/null
+++ b/vendor/github.com/fatih/camelcase/LICENSE.md
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Fatih Arslan
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/fatih/camelcase/README.md b/vendor/github.com/fatih/camelcase/README.md
new file mode 100644
index 000000000..105a6ae33
--- /dev/null
+++ b/vendor/github.com/fatih/camelcase/README.md
@@ -0,0 +1,58 @@
+# CamelCase [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/camelcase) [![Build Status](http://img.shields.io/travis/fatih/camelcase.svg?style=flat-square)](https://travis-ci.org/fatih/camelcase)
+
+CamelCase is a Golang (Go) package to split the words of a camelcase type
+string into a slice of words. It can be used to convert a camelcase word (lower
+or upper case) into any type of word.
+
+## Splitting rules:
+
+1. If string is not valid UTF-8, return it without splitting as
+ single item array.
+2. Assign all unicode characters into one of 4 sets: lower case
+ letters, upper case letters, numbers, and all other characters.
+3. Iterate through characters of string, introducing splits
+ between adjacent characters that belong to different sets.
+4. Iterate through array of split strings, and if a given string
+ is upper case:
+ * if subsequent string is lower case:
+ * move last character of upper case string to beginning of
+ lower case string
+
+## Install
+
+```bash
+go get github.com/fatih/camelcase
+```
+
+## Usage and examples
+
+```go
+splitted := camelcase.Split("GolangPackage")
+
+fmt.Println(splitted[0], splitted[1]) // prints: "Golang", "Package"
+```
+
+Both lower camel case and upper camel case are supported. For more info please
+check: [http://en.wikipedia.org/wiki/CamelCase](http://en.wikipedia.org/wiki/CamelCase)
+
+Below are some example cases:
+
+```
+"" => []
+"lowercase" => ["lowercase"]
+"Class" => ["Class"]
+"MyClass" => ["My", "Class"]
+"MyC" => ["My", "C"]
+"HTML" => ["HTML"]
+"PDFLoader" => ["PDF", "Loader"]
+"AString" => ["A", "String"]
+"SimpleXMLParser" => ["Simple", "XML", "Parser"]
+"vimRPCPlugin" => ["vim", "RPC", "Plugin"]
+"GL11Version" => ["GL", "11", "Version"]
+"99Bottles" => ["99", "Bottles"]
+"May5" => ["May", "5"]
+"BFG9000" => ["BFG", "9000"]
+"BöseÜberraschung" => ["Böse", "Überraschung"]
+"Two spaces" => ["Two", " ", "spaces"]
+"BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"]
+```
diff --git a/vendor/github.com/fatih/camelcase/camelcase.go b/vendor/github.com/fatih/camelcase/camelcase.go
new file mode 100644
index 000000000..02160c9a4
--- /dev/null
+++ b/vendor/github.com/fatih/camelcase/camelcase.go
@@ -0,0 +1,90 @@
+// Package camelcase is a micro package to split the words of a camelcase type
+// string into a slice of words.
+package camelcase
+
+import (
+ "unicode"
+ "unicode/utf8"
+)
+
+// Split splits the camelcase word and returns a list of words. It also
+// supports digits. Both lower camel case and upper camel case are supported.
+// For more info please check: http://en.wikipedia.org/wiki/CamelCase
+//
+// Examples
+//
+// "" => [""]
+// "lowercase" => ["lowercase"]
+// "Class" => ["Class"]
+// "MyClass" => ["My", "Class"]
+// "MyC" => ["My", "C"]
+// "HTML" => ["HTML"]
+// "PDFLoader" => ["PDF", "Loader"]
+// "AString" => ["A", "String"]
+// "SimpleXMLParser" => ["Simple", "XML", "Parser"]
+// "vimRPCPlugin" => ["vim", "RPC", "Plugin"]
+// "GL11Version" => ["GL", "11", "Version"]
+// "99Bottles" => ["99", "Bottles"]
+// "May5" => ["May", "5"]
+// "BFG9000" => ["BFG", "9000"]
+// "BöseÜberraschung" => ["Böse", "Überraschung"]
+// "Two spaces" => ["Two", " ", "spaces"]
+// "BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"]
+//
+// Splitting rules
+//
+// 1) If string is not valid UTF-8, return it without splitting as
+// single item array.
+// 2) Assign all unicode characters into one of 4 sets: lower case
+// letters, upper case letters, numbers, and all other characters.
+// 3) Iterate through characters of string, introducing splits
+// between adjacent characters that belong to different sets.
+// 4) Iterate through array of split strings, and if a given string
+// is upper case:
+// if subsequent string is lower case:
+// move last character of upper case string to beginning of
+// lower case string
+func Split(src string) (entries []string) {
+ // don't split invalid utf8
+ if !utf8.ValidString(src) {
+ return []string{src}
+ }
+ entries = []string{}
+ var runes [][]rune
+ lastClass := 0
+ class := 0
+ // split into fields based on class of unicode character
+ for _, r := range src {
+ switch true {
+ case unicode.IsLower(r):
+ class = 1
+ case unicode.IsUpper(r):
+ class = 2
+ case unicode.IsDigit(r):
+ class = 3
+ default:
+ class = 4
+ }
+ if class == lastClass {
+ runes[len(runes)-1] = append(runes[len(runes)-1], r)
+ } else {
+ runes = append(runes, []rune{r})
+ }
+ lastClass = class
+ }
+ // handle upper case -> lower case sequences, e.g.
+ // "PDFL", "oader" -> "PDF", "Loader"
+ for i := 0; i < len(runes)-1; i++ {
+ if unicode.IsUpper(runes[i][0]) && unicode.IsLower(runes[i+1][0]) {
+ runes[i+1] = append([]rune{runes[i][len(runes[i])-1]}, runes[i+1]...)
+ runes[i] = runes[i][:len(runes[i])-1]
+ }
+ }
+ // construct []string from results
+ for _, s := range runes {
+ if len(s) > 0 {
+ entries = append(entries, string(s))
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/fatih/structtag/.travis.yml b/vendor/github.com/fatih/structtag/.travis.yml
new file mode 100644
index 000000000..a668283da
--- /dev/null
+++ b/vendor/github.com/fatih/structtag/.travis.yml
@@ -0,0 +1,4 @@
+language: go
+go:
+ - 1.7.x
+ - tip
diff --git a/vendor/github.com/fatih/structtag/LICENSE b/vendor/github.com/fatih/structtag/LICENSE
new file mode 100644
index 000000000..4fd15f9f8
--- /dev/null
+++ b/vendor/github.com/fatih/structtag/LICENSE
@@ -0,0 +1,60 @@
+Copyright (c) 2017, Fatih Arslan
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of structtag nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+This software includes some portions from Go. Go is used under the terms of the
+BSD like license.
+
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+The Go gopher was designed by Renee French. http://reneefrench.blogspot.com/ The design is licensed under the Creative Commons 3.0 Attributions license. Read this article for more details: https://blog.golang.org/gopher
diff --git a/vendor/github.com/fatih/structtag/README.md b/vendor/github.com/fatih/structtag/README.md
new file mode 100644
index 000000000..bc11a8b99
--- /dev/null
+++ b/vendor/github.com/fatih/structtag/README.md
@@ -0,0 +1,73 @@
+# structtag [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/structtag) [![Build Status](https://travis-ci.org/fatih/structtag.svg?branch=master)](https://travis-ci.org/fatih/structtag)
+
+structtag provides an easy way of parsing and manipulating struct tag fields.
+Please vendor the library as it might change in future versions.
+
+# Install
+
+```bash
+go get github.com/fatih/structtag
+```
+
+# Example
+
+```go
+package main
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+
+ "github.com/fatih/structtag"
+)
+
+func main() {
+ type t struct {
+ t string `json:"foo,omitempty,string" xml:"foo"`
+ }
+
+ // get field tag
+ tag := reflect.TypeOf(t{}).Field(0).Tag
+
+ // ... and start using structtag by parsing the tag
+ tags, err := structtag.Parse(string(tag))
+ if err != nil {
+ panic(err)
+ }
+
+ // iterate over all tags
+ for _, t := range tags.Tags() {
+ fmt.Printf("tag: %+v\n", t)
+ }
+
+ // get a single tag
+ jsonTag, err := tags.Get("json")
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(jsonTag) // Output: json:"foo,omitempty,string"
+ fmt.Println(jsonTag.Key) // Output: json
+ fmt.Println(jsonTag.Name) // Output: foo
+ fmt.Println(jsonTag.Options) // Output: [omitempty string]
+
+ // change existing tag
+ jsonTag.Name = "foo_bar"
+ jsonTag.Options = nil
+ tags.Set(jsonTag)
+
+ // add new tag
+ tags.Set(&structtag.Tag{
+ Key: "hcl",
+ Name: "foo",
+ Options: []string{"squash"},
+ })
+
+ // print the tags
+ fmt.Println(tags) // Output: json:"foo_bar" xml:"foo" hcl:"foo,squash"
+
+ // sort tags according to keys
+ sort.Sort(tags)
+ fmt.Println(tags) // Output: hcl:"foo,squash" json:"foo_bar" xml:"foo"
+}
+```
diff --git a/vendor/github.com/fatih/structtag/tags.go b/vendor/github.com/fatih/structtag/tags.go
new file mode 100644
index 000000000..be28a9880
--- /dev/null
+++ b/vendor/github.com/fatih/structtag/tags.go
@@ -0,0 +1,309 @@
+package structtag
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var (
+ errTagSyntax = errors.New("bad syntax for struct tag pair")
+ errTagKeySyntax = errors.New("bad syntax for struct tag key")
+ errTagValueSyntax = errors.New("bad syntax for struct tag value")
+
+ errKeyNotSet = errors.New("tag key does not exist")
+ errTagNotExist = errors.New("tag does not exist")
+ errTagKeyMismatch = errors.New("mismatch between key and tag.key")
+)
+
+// Tags represent a set of tags from a single struct field
+type Tags struct {
+ tags []*Tag
+}
+
+// Tag defines a single struct's string literal tag
+type Tag struct {
+ // Key is the tag key, such as json, xml, etc..
+ // i.e: `json:"foo,omitempty". Here key is: "json"
+ Key string
+
+ // Name is a part of the value
+ // i.e: `json:"foo,omitempty". Here name is: "foo"
+ Name string
+
+ // Options is a part of the value. It contains a slice of tag options i.e:
+ // `json:"foo,omitempty". Here options is: ["omitempty"]
+ Options []string
+}
+
+// Parse parses a single struct field tag and returns the set of tags.
+func Parse(tag string) (*Tags, error) {
+ var tags []*Tag
+
+ // NOTE(arslan) following code is from reflect and vet package with some
+ // modifications to collect all necessary information and extend it with
+ // usable methods
+ for tag != "" {
+ // Skip leading space.
+ i := 0
+ for i < len(tag) && tag[i] == ' ' {
+ i++
+ }
+ tag = tag[i:]
+ if tag == "" {
+ return nil, nil
+ }
+
+ // Scan to colon. A space, a quote or a control character is a syntax
+ // error. Strictly speaking, control chars include the range [0x7f,
+ // 0x9f], not just [0x00, 0x1f], but in practice, we ignore the
+ // multi-byte control characters as it is simpler to inspect the tag's
+ // bytes than the tag's runes.
+ i = 0
+ for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
+ i++
+ }
+
+ if i == 0 {
+ return nil, errTagKeySyntax
+ }
+ if i+1 >= len(tag) || tag[i] != ':' {
+ return nil, errTagSyntax
+ }
+ if tag[i+1] != '"' {
+ return nil, errTagValueSyntax
+ }
+
+ key := string(tag[:i])
+ tag = tag[i+1:]
+
+ // Scan quoted string to find value.
+ i = 1
+ for i < len(tag) && tag[i] != '"' {
+ if tag[i] == '\\' {
+ i++
+ }
+ i++
+ }
+ if i >= len(tag) {
+ return nil, errTagValueSyntax
+ }
+
+ qvalue := string(tag[:i+1])
+ tag = tag[i+1:]
+
+ value, err := strconv.Unquote(qvalue)
+ if err != nil {
+ return nil, errTagValueSyntax
+ }
+
+ res := strings.Split(value, ",")
+ name := res[0]
+ options := res[1:]
+ if len(options) == 0 {
+ options = nil
+ }
+
+ tags = append(tags, &Tag{
+ Key: key,
+ Name: name,
+ Options: options,
+ })
+ }
+
+ return &Tags{
+ tags: tags,
+ }, nil
+}
+
+// Get returns the tag associated with the given key. If the key is present
+// in the tag the value (which may be empty) is returned. Otherwise the
+// returned value will be the empty string. The ok return value reports whether
+// the tag exists or not (which the return value is nil).
+func (t *Tags) Get(key string) (*Tag, error) {
+ for _, tag := range t.tags {
+ if tag.Key == key {
+ return tag, nil
+ }
+ }
+
+ return nil, errTagNotExist
+}
+
+// Set sets the given tag. If the tag key already exists it'll override it
+func (t *Tags) Set(tag *Tag) error {
+ if tag.Key == "" {
+ return errKeyNotSet
+ }
+
+ added := false
+ for i, tg := range t.tags {
+ if tg.Key == tag.Key {
+ added = true
+ t.tags[i] = tag
+ }
+ }
+
+ if !added {
+ // this means this is a new tag, add it
+ t.tags = append(t.tags, tag)
+ }
+
+ return nil
+}
+
+// AddOptions adds the given option for the given key. If the option already
+// exists it doesn't add it again.
+func (t *Tags) AddOptions(key string, options ...string) {
+ for i, tag := range t.tags {
+ if tag.Key != key {
+ continue
+ }
+
+ for _, opt := range options {
+ if !tag.HasOption(opt) {
+ tag.Options = append(tag.Options, opt)
+ }
+ }
+
+ t.tags[i] = tag
+ }
+}
+
+// DeleteOptions deletes the given options for the given key
+func (t *Tags) DeleteOptions(key string, options ...string) {
+ hasOption := func(option string) bool {
+ for _, opt := range options {
+ if opt == option {
+ return true
+ }
+ }
+ return false
+ }
+
+ for i, tag := range t.tags {
+ if tag.Key != key {
+ continue
+ }
+
+ var updated []string
+ for _, opt := range tag.Options {
+ if !hasOption(opt) {
+ updated = append(updated, opt)
+ }
+ }
+
+ tag.Options = updated
+ t.tags[i] = tag
+ }
+}
+
+// Delete deletes the tag for the given keys
+func (t *Tags) Delete(keys ...string) {
+ hasKey := func(key string) bool {
+ for _, k := range keys {
+ if k == key {
+ return true
+ }
+ }
+ return false
+ }
+
+ var updated []*Tag
+ for _, tag := range t.tags {
+ if !hasKey(tag.Key) {
+ updated = append(updated, tag)
+ }
+ }
+
+ t.tags = updated
+}
+
+// Tags returns a slice of tags. The order is the original tag order unless it
+// was changed.
+func (t *Tags) Tags() []*Tag {
+ return t.tags
+}
+
+// Tags returns a slice of tags. The order is the original tag order unless it
+// was changed.
+func (t *Tags) Keys() []string {
+ var keys []string
+ for _, tag := range t.tags {
+ keys = append(keys, tag.Key)
+ }
+ return keys
+}
+
+// String reassembles the tags into a valid literal tag field representation
+func (t *Tags) String() string {
+ tags := t.Tags()
+ if len(tags) == 0 {
+ return ""
+ }
+
+ var buf bytes.Buffer
+ for i, tag := range t.Tags() {
+ buf.WriteString(tag.String())
+ if i != len(tags)-1 {
+ buf.WriteString(" ")
+ }
+ }
+ return buf.String()
+}
+
+// HasOption returns true if the given option is available in options
+func (t *Tag) HasOption(opt string) bool {
+ for _, tagOpt := range t.Options {
+ if tagOpt == opt {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Value returns the raw value of the tag, i.e. if the tag is
+// `json:"foo,omitempty", the Value is "foo,omitempty"
+func (t *Tag) Value() string {
+ options := strings.Join(t.Options, ",")
+ if options != "" {
+ return fmt.Sprintf(`%s,%s`, t.Name, options)
+ }
+ return t.Name
+}
+
+// String reassembles the tag into a valid tag field representation
+func (t *Tag) String() string {
+ return fmt.Sprintf(`%s:"%s"`, t.Key, t.Value())
+}
+
+// GoString implements the fmt.GoStringer interface
+func (t *Tag) GoString() string {
+ template := `{
+ Key: '%s',
+ Name: '%s',
+ Option: '%s',
+ }`
+
+ if t.Options == nil {
+ return fmt.Sprintf(template, t.Key, t.Name, "nil")
+ }
+
+ options := strings.Join(t.Options, ",")
+ return fmt.Sprintf(template, t.Key, t.Name, options)
+}
+
+func (t *Tags) Len() int {
+ return len(t.tags)
+}
+
+func (t *Tags) Less(i int, j int) bool {
+ return t.tags[i].Key < t.tags[j].Key
+}
+
+func (t *Tags) Swap(i int, j int) {
+ t.tags[i], t.tags[j] = t.tags[j], t.tags[i]
+}
diff --git a/vendor/github.com/gobwas/glob/.gitignore b/vendor/github.com/gobwas/glob/.gitignore
new file mode 100644
index 000000000..b4ae623be
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/.gitignore
@@ -0,0 +1,8 @@
+glob.iml
+.idea
+*.cpu
+*.mem
+*.test
+*.dot
+*.png
+*.svg
diff --git a/vendor/github.com/gobwas/glob/.travis.yml b/vendor/github.com/gobwas/glob/.travis.yml
new file mode 100644
index 000000000..e8a276826
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/.travis.yml
@@ -0,0 +1,9 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.5.3
+
+script:
+ - go test -v ./...
diff --git a/vendor/github.com/gobwas/glob/LICENSE b/vendor/github.com/gobwas/glob/LICENSE
new file mode 100644
index 000000000..9d4735cad
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Sergey Kamardin
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/gobwas/glob/bench.sh b/vendor/github.com/gobwas/glob/bench.sh
new file mode 100644
index 000000000..804cf22e6
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/bench.sh
@@ -0,0 +1,26 @@
+#! /bin/bash
+
+bench() {
+ filename="/tmp/$1-$2.bench"
+ if test -e "${filename}";
+ then
+ echo "Already exists ${filename}"
+ else
+ backup=`git rev-parse --abbrev-ref HEAD`
+ git checkout $1
+ echo -n "Creating ${filename}... "
+ go test ./... -run=NONE -bench=$2 > "${filename}" -benchmem
+ echo "OK"
+ git checkout ${backup}
+ sleep 5
+ fi
+}
+
+
+to=$1
+current=`git rev-parse --abbrev-ref HEAD`
+
+bench ${to} $2
+bench ${current} $2
+
+benchcmp $3 "/tmp/${to}-$2.bench" "/tmp/${current}-$2.bench"
diff --git a/vendor/github.com/gobwas/glob/compiler/compiler.go b/vendor/github.com/gobwas/glob/compiler/compiler.go
new file mode 100644
index 000000000..02e7de80a
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/compiler/compiler.go
@@ -0,0 +1,525 @@
+package compiler
+
+// TODO use constructor with all matchers, and to their structs private
+// TODO glue multiple Text nodes (like after QuoteMeta)
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/gobwas/glob/match"
+ "github.com/gobwas/glob/syntax/ast"
+ "github.com/gobwas/glob/util/runes"
+)
+
+func optimizeMatcher(matcher match.Matcher) match.Matcher {
+ switch m := matcher.(type) {
+
+ case match.Any:
+ if len(m.Separators) == 0 {
+ return match.NewSuper()
+ }
+
+ case match.AnyOf:
+ if len(m.Matchers) == 1 {
+ return m.Matchers[0]
+ }
+
+ return m
+
+ case match.List:
+ if m.Not == false && len(m.List) == 1 {
+ return match.NewText(string(m.List))
+ }
+
+ return m
+
+ case match.BTree:
+ m.Left = optimizeMatcher(m.Left)
+ m.Right = optimizeMatcher(m.Right)
+
+ r, ok := m.Value.(match.Text)
+ if !ok {
+ return m
+ }
+
+ var (
+ leftNil = m.Left == nil
+ rightNil = m.Right == nil
+ )
+ if leftNil && rightNil {
+ return match.NewText(r.Str)
+ }
+
+ _, leftSuper := m.Left.(match.Super)
+ lp, leftPrefix := m.Left.(match.Prefix)
+ la, leftAny := m.Left.(match.Any)
+
+ _, rightSuper := m.Right.(match.Super)
+ rs, rightSuffix := m.Right.(match.Suffix)
+ ra, rightAny := m.Right.(match.Any)
+
+ switch {
+ case leftSuper && rightSuper:
+ return match.NewContains(r.Str, false)
+
+ case leftSuper && rightNil:
+ return match.NewSuffix(r.Str)
+
+ case rightSuper && leftNil:
+ return match.NewPrefix(r.Str)
+
+ case leftNil && rightSuffix:
+ return match.NewPrefixSuffix(r.Str, rs.Suffix)
+
+ case rightNil && leftPrefix:
+ return match.NewPrefixSuffix(lp.Prefix, r.Str)
+
+ case rightNil && leftAny:
+ return match.NewSuffixAny(r.Str, la.Separators)
+
+ case leftNil && rightAny:
+ return match.NewPrefixAny(r.Str, ra.Separators)
+ }
+
+ return m
+ }
+
+ return matcher
+}
+
+func compileMatchers(matchers []match.Matcher) (match.Matcher, error) {
+ if len(matchers) == 0 {
+ return nil, fmt.Errorf("compile error: need at least one matcher")
+ }
+ if len(matchers) == 1 {
+ return matchers[0], nil
+ }
+ if m := glueMatchers(matchers); m != nil {
+ return m, nil
+ }
+
+ idx := -1
+ maxLen := -1
+ var val match.Matcher
+ for i, matcher := range matchers {
+ if l := matcher.Len(); l != -1 && l >= maxLen {
+ maxLen = l
+ idx = i
+ val = matcher
+ }
+ }
+
+ if val == nil { // not found matcher with static length
+ r, err := compileMatchers(matchers[1:])
+ if err != nil {
+ return nil, err
+ }
+ return match.NewBTree(matchers[0], nil, r), nil
+ }
+
+ left := matchers[:idx]
+ var right []match.Matcher
+ if len(matchers) > idx+1 {
+ right = matchers[idx+1:]
+ }
+
+ var l, r match.Matcher
+ var err error
+ if len(left) > 0 {
+ l, err = compileMatchers(left)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(right) > 0 {
+ r, err = compileMatchers(right)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return match.NewBTree(val, l, r), nil
+}
+
+func glueMatchers(matchers []match.Matcher) match.Matcher {
+ if m := glueMatchersAsEvery(matchers); m != nil {
+ return m
+ }
+ if m := glueMatchersAsRow(matchers); m != nil {
+ return m
+ }
+ return nil
+}
+
+func glueMatchersAsRow(matchers []match.Matcher) match.Matcher {
+ if len(matchers) <= 1 {
+ return nil
+ }
+
+ var (
+ c []match.Matcher
+ l int
+ )
+ for _, matcher := range matchers {
+ if ml := matcher.Len(); ml == -1 {
+ return nil
+ } else {
+ c = append(c, matcher)
+ l += ml
+ }
+ }
+ return match.NewRow(l, c...)
+}
+
+func glueMatchersAsEvery(matchers []match.Matcher) match.Matcher {
+ if len(matchers) <= 1 {
+ return nil
+ }
+
+ var (
+ hasAny bool
+ hasSuper bool
+ hasSingle bool
+ min int
+ separator []rune
+ )
+
+ for i, matcher := range matchers {
+ var sep []rune
+
+ switch m := matcher.(type) {
+ case match.Super:
+ sep = []rune{}
+ hasSuper = true
+
+ case match.Any:
+ sep = m.Separators
+ hasAny = true
+
+ case match.Single:
+ sep = m.Separators
+ hasSingle = true
+ min++
+
+ case match.List:
+ if !m.Not {
+ return nil
+ }
+ sep = m.List
+ hasSingle = true
+ min++
+
+ default:
+ return nil
+ }
+
+ // initialize
+ if i == 0 {
+ separator = sep
+ }
+
+ if runes.Equal(sep, separator) {
+ continue
+ }
+
+ return nil
+ }
+
+ if hasSuper && !hasAny && !hasSingle {
+ return match.NewSuper()
+ }
+
+ if hasAny && !hasSuper && !hasSingle {
+ return match.NewAny(separator)
+ }
+
+ if (hasAny || hasSuper) && min > 0 && len(separator) == 0 {
+ return match.NewMin(min)
+ }
+
+ every := match.NewEveryOf()
+
+ if min > 0 {
+ every.Add(match.NewMin(min))
+
+ if !hasAny && !hasSuper {
+ every.Add(match.NewMax(min))
+ }
+ }
+
+ if len(separator) > 0 {
+ every.Add(match.NewContains(string(separator), true))
+ }
+
+ return every
+}
+
+func minimizeMatchers(matchers []match.Matcher) []match.Matcher {
+ var done match.Matcher
+ var left, right, count int
+
+ for l := 0; l < len(matchers); l++ {
+ for r := len(matchers); r > l; r-- {
+ if glued := glueMatchers(matchers[l:r]); glued != nil {
+ var swap bool
+
+ if done == nil {
+ swap = true
+ } else {
+ cl, gl := done.Len(), glued.Len()
+ swap = cl > -1 && gl > -1 && gl > cl
+ swap = swap || count < r-l
+ }
+
+ if swap {
+ done = glued
+ left = l
+ right = r
+ count = r - l
+ }
+ }
+ }
+ }
+
+ if done == nil {
+ return matchers
+ }
+
+ next := append(append([]match.Matcher{}, matchers[:left]...), done)
+ if right < len(matchers) {
+ next = append(next, matchers[right:]...)
+ }
+
+ if len(next) == len(matchers) {
+ return next
+ }
+
+ return minimizeMatchers(next)
+}
+
+// minimizeAnyOf tries to apply some heuristics to minimize number of nodes in given tree
+func minimizeTree(tree *ast.Node) *ast.Node {
+ switch tree.Kind {
+ case ast.KindAnyOf:
+ return minimizeTreeAnyOf(tree)
+ default:
+ return nil
+ }
+}
+
+// minimizeAnyOf tries to find common children of given node of AnyOf pattern
+// it searches for common children from left and from right
+// if any common children are found – then it returns new optimized ast tree
+// else it returns nil
+func minimizeTreeAnyOf(tree *ast.Node) *ast.Node {
+ if !areOfSameKind(tree.Children, ast.KindPattern) {
+ return nil
+ }
+
+ commonLeft, commonRight := commonChildren(tree.Children)
+ commonLeftCount, commonRightCount := len(commonLeft), len(commonRight)
+ if commonLeftCount == 0 && commonRightCount == 0 { // there are no common parts
+ return nil
+ }
+
+ var result []*ast.Node
+ if commonLeftCount > 0 {
+ result = append(result, ast.NewNode(ast.KindPattern, nil, commonLeft...))
+ }
+
+ var anyOf []*ast.Node
+ for _, child := range tree.Children {
+ reuse := child.Children[commonLeftCount : len(child.Children)-commonRightCount]
+ var node *ast.Node
+ if len(reuse) == 0 {
+ // this pattern is completely reduced by commonLeft and commonRight patterns
+ // so it become nothing
+ node = ast.NewNode(ast.KindNothing, nil)
+ } else {
+ node = ast.NewNode(ast.KindPattern, nil, reuse...)
+ }
+ anyOf = appendIfUnique(anyOf, node)
+ }
+ switch {
+ case len(anyOf) == 1 && anyOf[0].Kind != ast.KindNothing:
+ result = append(result, anyOf[0])
+ case len(anyOf) > 1:
+ result = append(result, ast.NewNode(ast.KindAnyOf, nil, anyOf...))
+ }
+
+ if commonRightCount > 0 {
+ result = append(result, ast.NewNode(ast.KindPattern, nil, commonRight...))
+ }
+
+ return ast.NewNode(ast.KindPattern, nil, result...)
+}
+
+func commonChildren(nodes []*ast.Node) (commonLeft, commonRight []*ast.Node) {
+ if len(nodes) <= 1 {
+ return
+ }
+
+ // find node that has least number of children
+ idx := leastChildren(nodes)
+ if idx == -1 {
+ return
+ }
+ tree := nodes[idx]
+ treeLength := len(tree.Children)
+
+ // allocate max able size for rightCommon slice
+ // to get ability insert elements in reverse order (from end to start)
+ // without sorting
+ commonRight = make([]*ast.Node, treeLength)
+ lastRight := treeLength // will use this to get results as commonRight[lastRight:]
+
+ var (
+ breakLeft bool
+ breakRight bool
+ commonTotal int
+ )
+ for i, j := 0, treeLength-1; commonTotal < treeLength && j >= 0 && !(breakLeft && breakRight); i, j = i+1, j-1 {
+ treeLeft := tree.Children[i]
+ treeRight := tree.Children[j]
+
+ for k := 0; k < len(nodes) && !(breakLeft && breakRight); k++ {
+ // skip least children node
+ if k == idx {
+ continue
+ }
+
+ restLeft := nodes[k].Children[i]
+ restRight := nodes[k].Children[j+len(nodes[k].Children)-treeLength]
+
+ breakLeft = breakLeft || !treeLeft.Equal(restLeft)
+
+ // disable searching for right common parts, if left part is already overlapping
+ breakRight = breakRight || (!breakLeft && j <= i)
+ breakRight = breakRight || !treeRight.Equal(restRight)
+ }
+
+ if !breakLeft {
+ commonTotal++
+ commonLeft = append(commonLeft, treeLeft)
+ }
+ if !breakRight {
+ commonTotal++
+ lastRight = j
+ commonRight[j] = treeRight
+ }
+ }
+
+ commonRight = commonRight[lastRight:]
+
+ return
+}
+
+func appendIfUnique(target []*ast.Node, val *ast.Node) []*ast.Node {
+ for _, n := range target {
+ if reflect.DeepEqual(n, val) {
+ return target
+ }
+ }
+ return append(target, val)
+}
+
+func areOfSameKind(nodes []*ast.Node, kind ast.Kind) bool {
+ for _, n := range nodes {
+ if n.Kind != kind {
+ return false
+ }
+ }
+ return true
+}
+
+func leastChildren(nodes []*ast.Node) int {
+ min := -1
+ idx := -1
+ for i, n := range nodes {
+ if idx == -1 || (len(n.Children) < min) {
+ min = len(n.Children)
+ idx = i
+ }
+ }
+ return idx
+}
+
+func compileTreeChildren(tree *ast.Node, sep []rune) ([]match.Matcher, error) {
+ var matchers []match.Matcher
+ for _, desc := range tree.Children {
+ m, err := compile(desc, sep)
+ if err != nil {
+ return nil, err
+ }
+ matchers = append(matchers, optimizeMatcher(m))
+ }
+ return matchers, nil
+}
+
+func compile(tree *ast.Node, sep []rune) (m match.Matcher, err error) {
+ switch tree.Kind {
+ case ast.KindAnyOf:
+ // todo this could be faster on pattern_alternatives_combine_lite (see glob_test.go)
+ if n := minimizeTree(tree); n != nil {
+ return compile(n, sep)
+ }
+ matchers, err := compileTreeChildren(tree, sep)
+ if err != nil {
+ return nil, err
+ }
+ return match.NewAnyOf(matchers...), nil
+
+ case ast.KindPattern:
+ if len(tree.Children) == 0 {
+ return match.NewNothing(), nil
+ }
+ matchers, err := compileTreeChildren(tree, sep)
+ if err != nil {
+ return nil, err
+ }
+ m, err = compileMatchers(minimizeMatchers(matchers))
+ if err != nil {
+ return nil, err
+ }
+
+ case ast.KindAny:
+ m = match.NewAny(sep)
+
+ case ast.KindSuper:
+ m = match.NewSuper()
+
+ case ast.KindSingle:
+ m = match.NewSingle(sep)
+
+ case ast.KindNothing:
+ m = match.NewNothing()
+
+ case ast.KindList:
+ l := tree.Value.(ast.List)
+ m = match.NewList([]rune(l.Chars), l.Not)
+
+ case ast.KindRange:
+ r := tree.Value.(ast.Range)
+ m = match.NewRange(r.Lo, r.Hi, r.Not)
+
+ case ast.KindText:
+ t := tree.Value.(ast.Text)
+ m = match.NewText(t.Text)
+
+ default:
+ return nil, fmt.Errorf("could not compile tree: unknown node type")
+ }
+
+ return optimizeMatcher(m), nil
+}
+
+func Compile(tree *ast.Node, sep []rune) (match.Matcher, error) {
+ m, err := compile(tree, sep)
+ if err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}
diff --git a/vendor/github.com/gobwas/glob/glob.go b/vendor/github.com/gobwas/glob/glob.go
new file mode 100644
index 000000000..2afde343a
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/glob.go
@@ -0,0 +1,80 @@
+package glob
+
+import (
+ "github.com/gobwas/glob/compiler"
+ "github.com/gobwas/glob/syntax"
+)
+
+// Glob represents compiled glob pattern.
+type Glob interface {
+ Match(string) bool
+}
+
+// Compile creates Glob for given pattern and strings (if any present after pattern) as separators.
+// The pattern syntax is:
+//
+// pattern:
+// { term }
+//
+// term:
+// `*` matches any sequence of non-separator characters
+// `**` matches any sequence of characters
+// `?` matches any single non-separator character
+// `[` [ `!` ] { character-range } `]`
+// character class (must be non-empty)
+// `{` pattern-list `}`
+// pattern alternatives
+// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`)
+// `\` c matches character c
+//
+// character-range:
+// c matches character c (c != `\\`, `-`, `]`)
+// `\` c matches character c
+// lo `-` hi matches character c for lo <= c <= hi
+//
+// pattern-list:
+// pattern { `,` pattern }
+// comma-separated (without spaces) patterns
+//
+func Compile(pattern string, separators ...rune) (Glob, error) {
+ ast, err := syntax.Parse(pattern)
+ if err != nil {
+ return nil, err
+ }
+
+ matcher, err := compiler.Compile(ast, separators)
+ if err != nil {
+ return nil, err
+ }
+
+ return matcher, nil
+}
+
+// MustCompile is the same as Compile, except that if Compile returns error, this will panic
+func MustCompile(pattern string, separators ...rune) Glob {
+ g, err := Compile(pattern, separators...)
+ if err != nil {
+ panic(err)
+ }
+
+ return g
+}
+
+// QuoteMeta returns a string that quotes all glob pattern meta characters
+// inside the argument text; For example, QuoteMeta(`{foo*}`) returns `\[foo\*\]`.
+func QuoteMeta(s string) string {
+ b := make([]byte, 2*len(s))
+
+ // a byte loop is correct because all meta characters are ASCII
+ j := 0
+ for i := 0; i < len(s); i++ {
+ if syntax.Special(s[i]) {
+ b[j] = '\\'
+ j++
+ }
+ b[j] = s[i]
+ j++
+ }
+
+ return string(b[0:j])
+}
diff --git a/vendor/github.com/gobwas/glob/match/any.go b/vendor/github.com/gobwas/glob/match/any.go
new file mode 100644
index 000000000..514a9a5c4
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/any.go
@@ -0,0 +1,45 @@
+package match
+
+import (
+ "fmt"
+ "github.com/gobwas/glob/util/strings"
+)
+
+type Any struct {
+ Separators []rune
+}
+
+func NewAny(s []rune) Any {
+ return Any{s}
+}
+
+func (self Any) Match(s string) bool {
+ return strings.IndexAnyRunes(s, self.Separators) == -1
+}
+
+func (self Any) Index(s string) (int, []int) {
+ found := strings.IndexAnyRunes(s, self.Separators)
+ switch found {
+ case -1:
+ case 0:
+ return 0, segments0
+ default:
+ s = s[:found]
+ }
+
+ segments := acquireSegments(len(s))
+ for i := range s {
+ segments = append(segments, i)
+ }
+ segments = append(segments, len(s))
+
+ return 0, segments
+}
+
+func (self Any) Len() int {
+ return lenNo
+}
+
+func (self Any) String() string {
+ return fmt.Sprintf("", string(self.Separators))
+}
diff --git a/vendor/github.com/gobwas/glob/match/any_of.go b/vendor/github.com/gobwas/glob/match/any_of.go
new file mode 100644
index 000000000..8e65356cd
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/any_of.go
@@ -0,0 +1,82 @@
+package match
+
+import "fmt"
+
+type AnyOf struct {
+ Matchers Matchers
+}
+
+func NewAnyOf(m ...Matcher) AnyOf {
+ return AnyOf{Matchers(m)}
+}
+
+func (self *AnyOf) Add(m Matcher) error {
+ self.Matchers = append(self.Matchers, m)
+ return nil
+}
+
+func (self AnyOf) Match(s string) bool {
+ for _, m := range self.Matchers {
+ if m.Match(s) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (self AnyOf) Index(s string) (int, []int) {
+ index := -1
+
+ segments := acquireSegments(len(s))
+ for _, m := range self.Matchers {
+ idx, seg := m.Index(s)
+ if idx == -1 {
+ continue
+ }
+
+ if index == -1 || idx < index {
+ index = idx
+ segments = append(segments[:0], seg...)
+ continue
+ }
+
+ if idx > index {
+ continue
+ }
+
+ // here idx == index
+ segments = appendMerge(segments, seg)
+ }
+
+ if index == -1 {
+ releaseSegments(segments)
+ return -1, nil
+ }
+
+ return index, segments
+}
+
+func (self AnyOf) Len() (l int) {
+ l = -1
+ for _, m := range self.Matchers {
+ ml := m.Len()
+ switch {
+ case l == -1:
+ l = ml
+ continue
+
+ case ml == -1:
+ return -1
+
+ case l != ml:
+ return -1
+ }
+ }
+
+ return
+}
+
+func (self AnyOf) String() string {
+ return fmt.Sprintf("", self.Matchers)
+}
diff --git a/vendor/github.com/gobwas/glob/match/btree.go b/vendor/github.com/gobwas/glob/match/btree.go
new file mode 100644
index 000000000..a8130e93e
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/btree.go
@@ -0,0 +1,146 @@
+package match
+
+import (
+ "fmt"
+ "unicode/utf8"
+)
+
+type BTree struct {
+ Value Matcher
+ Left Matcher
+ Right Matcher
+ ValueLengthRunes int
+ LeftLengthRunes int
+ RightLengthRunes int
+ LengthRunes int
+}
+
+func NewBTree(Value, Left, Right Matcher) (tree BTree) {
+ tree.Value = Value
+ tree.Left = Left
+ tree.Right = Right
+
+ lenOk := true
+ if tree.ValueLengthRunes = Value.Len(); tree.ValueLengthRunes == -1 {
+ lenOk = false
+ }
+
+ if Left != nil {
+ if tree.LeftLengthRunes = Left.Len(); tree.LeftLengthRunes == -1 {
+ lenOk = false
+ }
+ }
+
+ if Right != nil {
+ if tree.RightLengthRunes = Right.Len(); tree.RightLengthRunes == -1 {
+ lenOk = false
+ }
+ }
+
+ if lenOk {
+ tree.LengthRunes = tree.LeftLengthRunes + tree.ValueLengthRunes + tree.RightLengthRunes
+ } else {
+ tree.LengthRunes = -1
+ }
+
+ return tree
+}
+
+func (self BTree) Len() int {
+ return self.LengthRunes
+}
+
+// todo?
+func (self BTree) Index(s string) (int, []int) {
+ return -1, nil
+}
+
+func (self BTree) Match(s string) bool {
+ inputLen := len(s)
+
+ // self.Length, self.RLen and self.LLen are values meaning the length of runes for each part
+ // here we manipulating byte length for better optimizations
+ // but these checks still works, cause minLen of 1-rune string is 1 byte.
+ if self.LengthRunes != -1 && self.LengthRunes > inputLen {
+ return false
+ }
+
+ // try to cut unnecessary parts
+ // by knowledge of length of right and left part
+ var offset, limit int
+ if self.LeftLengthRunes >= 0 {
+ offset = self.LeftLengthRunes
+ }
+ if self.RightLengthRunes >= 0 {
+ limit = inputLen - self.RightLengthRunes
+ } else {
+ limit = inputLen
+ }
+
+ for offset < limit {
+ // search for matching part in substring
+ index, segments := self.Value.Index(s[offset:limit])
+ if index == -1 {
+ releaseSegments(segments)
+ return false
+ }
+
+ l := s[:offset+index]
+ var left bool
+ if self.Left != nil {
+ left = self.Left.Match(l)
+ } else {
+ left = l == ""
+ }
+
+ if left {
+ for i := len(segments) - 1; i >= 0; i-- {
+ length := segments[i]
+
+ var right bool
+ var r string
+ // if there is no string for the right branch
+ if inputLen <= offset+index+length {
+ r = ""
+ } else {
+ r = s[offset+index+length:]
+ }
+
+ if self.Right != nil {
+ right = self.Right.Match(r)
+ } else {
+ right = r == ""
+ }
+
+ if right {
+ releaseSegments(segments)
+ return true
+ }
+ }
+ }
+
+ _, step := utf8.DecodeRuneInString(s[offset+index:])
+ offset += index + step
+
+ releaseSegments(segments)
+ }
+
+ return false
+}
+
+func (self BTree) String() string {
+ const n string = ""
+ var l, r string
+ if self.Left == nil {
+ l = n
+ } else {
+ l = self.Left.String()
+ }
+ if self.Right == nil {
+ r = n
+ } else {
+ r = self.Right.String()
+ }
+
+ return fmt.Sprintf("%s]>", l, self.Value, r)
+}
diff --git a/vendor/github.com/gobwas/glob/match/contains.go b/vendor/github.com/gobwas/glob/match/contains.go
new file mode 100644
index 000000000..0998e95b0
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/contains.go
@@ -0,0 +1,58 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+)
+
+type Contains struct {
+ Needle string
+ Not bool
+}
+
+func NewContains(needle string, not bool) Contains {
+ return Contains{needle, not}
+}
+
+func (self Contains) Match(s string) bool {
+ return strings.Contains(s, self.Needle) != self.Not
+}
+
+func (self Contains) Index(s string) (int, []int) {
+ var offset int
+
+ idx := strings.Index(s, self.Needle)
+
+ if !self.Not {
+ if idx == -1 {
+ return -1, nil
+ }
+
+ offset = idx + len(self.Needle)
+ if len(s) <= offset {
+ return 0, []int{offset}
+ }
+ s = s[offset:]
+ } else if idx != -1 {
+ s = s[:idx]
+ }
+
+ segments := acquireSegments(len(s) + 1)
+ for i := range s {
+ segments = append(segments, offset+i)
+ }
+
+ return 0, append(segments, offset+len(s))
+}
+
+func (self Contains) Len() int {
+ return lenNo
+}
+
+func (self Contains) String() string {
+ var not string
+ if self.Not {
+ not = "!"
+ }
+ return fmt.Sprintf("", not, self.Needle)
+}
diff --git a/vendor/github.com/gobwas/glob/match/every_of.go b/vendor/github.com/gobwas/glob/match/every_of.go
new file mode 100644
index 000000000..7c968ee36
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/every_of.go
@@ -0,0 +1,99 @@
+package match
+
+import (
+ "fmt"
+)
+
+type EveryOf struct {
+ Matchers Matchers
+}
+
+func NewEveryOf(m ...Matcher) EveryOf {
+ return EveryOf{Matchers(m)}
+}
+
+func (self *EveryOf) Add(m Matcher) error {
+ self.Matchers = append(self.Matchers, m)
+ return nil
+}
+
+func (self EveryOf) Len() (l int) {
+ for _, m := range self.Matchers {
+ if ml := m.Len(); l > 0 {
+ l += ml
+ } else {
+ return -1
+ }
+ }
+
+ return
+}
+
+func (self EveryOf) Index(s string) (int, []int) {
+ var index int
+ var offset int
+
+ // make `in` with cap as len(s),
+ // cause it is the maximum size of output segments values
+ next := acquireSegments(len(s))
+ current := acquireSegments(len(s))
+
+ sub := s
+ for i, m := range self.Matchers {
+ idx, seg := m.Index(sub)
+ if idx == -1 {
+ releaseSegments(next)
+ releaseSegments(current)
+ return -1, nil
+ }
+
+ if i == 0 {
+ // we use copy here instead of `current = seg`
+ // cause seg is a slice from reusable buffer `in`
+ // and it could be overwritten in next iteration
+ current = append(current, seg...)
+ } else {
+ // clear the next
+ next = next[:0]
+
+ delta := index - (idx + offset)
+ for _, ex := range current {
+ for _, n := range seg {
+ if ex+delta == n {
+ next = append(next, n)
+ }
+ }
+ }
+
+ if len(next) == 0 {
+ releaseSegments(next)
+ releaseSegments(current)
+ return -1, nil
+ }
+
+ current = append(current[:0], next...)
+ }
+
+ index = idx + offset
+ sub = s[index:]
+ offset += idx
+ }
+
+ releaseSegments(next)
+
+ return index, current
+}
+
+func (self EveryOf) Match(s string) bool {
+ for _, m := range self.Matchers {
+ if !m.Match(s) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (self EveryOf) String() string {
+ return fmt.Sprintf("", self.Matchers)
+}
diff --git a/vendor/github.com/gobwas/glob/match/list.go b/vendor/github.com/gobwas/glob/match/list.go
new file mode 100644
index 000000000..7fd763ecd
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/list.go
@@ -0,0 +1,49 @@
+package match
+
+import (
+ "fmt"
+ "github.com/gobwas/glob/util/runes"
+ "unicode/utf8"
+)
+
+type List struct {
+ List []rune
+ Not bool
+}
+
+func NewList(list []rune, not bool) List {
+ return List{list, not}
+}
+
+func (self List) Match(s string) bool {
+ r, w := utf8.DecodeRuneInString(s)
+ if len(s) > w {
+ return false
+ }
+
+ inList := runes.IndexRune(self.List, r) != -1
+ return inList == !self.Not
+}
+
+func (self List) Len() int {
+ return lenOne
+}
+
+func (self List) Index(s string) (int, []int) {
+ for i, r := range s {
+ if self.Not == (runes.IndexRune(self.List, r) == -1) {
+ return i, segmentsByRuneLength[utf8.RuneLen(r)]
+ }
+ }
+
+ return -1, nil
+}
+
+func (self List) String() string {
+ var not string
+ if self.Not {
+ not = "!"
+ }
+
+ return fmt.Sprintf("", not, string(self.List))
+}
diff --git a/vendor/github.com/gobwas/glob/match/match.go b/vendor/github.com/gobwas/glob/match/match.go
new file mode 100644
index 000000000..f80e007fb
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/match.go
@@ -0,0 +1,81 @@
+package match
+
+// todo common table of rune's length
+
+import (
+ "fmt"
+ "strings"
+)
+
+const lenOne = 1
+const lenZero = 0
+const lenNo = -1
+
+type Matcher interface {
+ Match(string) bool
+ Index(string) (int, []int)
+ Len() int
+ String() string
+}
+
+type Matchers []Matcher
+
+func (m Matchers) String() string {
+ var s []string
+ for _, matcher := range m {
+ s = append(s, fmt.Sprint(matcher))
+ }
+
+ return fmt.Sprintf("%s", strings.Join(s, ","))
+}
+
+// appendMerge merges and sorts given already SORTED and UNIQUE segments.
+func appendMerge(target, sub []int) []int {
+ lt, ls := len(target), len(sub)
+ out := make([]int, 0, lt+ls)
+
+ for x, y := 0, 0; x < lt || y < ls; {
+ if x >= lt {
+ out = append(out, sub[y:]...)
+ break
+ }
+
+ if y >= ls {
+ out = append(out, target[x:]...)
+ break
+ }
+
+ xValue := target[x]
+ yValue := sub[y]
+
+ switch {
+
+ case xValue == yValue:
+ out = append(out, xValue)
+ x++
+ y++
+
+ case xValue < yValue:
+ out = append(out, xValue)
+ x++
+
+ case yValue < xValue:
+ out = append(out, yValue)
+ y++
+
+ }
+ }
+
+ target = append(target[:0], out...)
+
+ return target
+}
+
+func reverseSegments(input []int) {
+ l := len(input)
+ m := l / 2
+
+ for i := 0; i < m; i++ {
+ input[i], input[l-i-1] = input[l-i-1], input[i]
+ }
+}
diff --git a/vendor/github.com/gobwas/glob/match/max.go b/vendor/github.com/gobwas/glob/match/max.go
new file mode 100644
index 000000000..d72f69eff
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/max.go
@@ -0,0 +1,49 @@
+package match
+
+import (
+ "fmt"
+ "unicode/utf8"
+)
+
+type Max struct {
+ Limit int
+}
+
+func NewMax(l int) Max {
+ return Max{l}
+}
+
+func (self Max) Match(s string) bool {
+ var l int
+ for range s {
+ l += 1
+ if l > self.Limit {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (self Max) Index(s string) (int, []int) {
+ segments := acquireSegments(self.Limit + 1)
+ segments = append(segments, 0)
+ var count int
+ for i, r := range s {
+ count++
+ if count > self.Limit {
+ break
+ }
+ segments = append(segments, i+utf8.RuneLen(r))
+ }
+
+ return 0, segments
+}
+
+func (self Max) Len() int {
+ return lenNo
+}
+
+func (self Max) String() string {
+ return fmt.Sprintf("", self.Limit)
+}
diff --git a/vendor/github.com/gobwas/glob/match/min.go b/vendor/github.com/gobwas/glob/match/min.go
new file mode 100644
index 000000000..db57ac8eb
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/min.go
@@ -0,0 +1,57 @@
+package match
+
+import (
+ "fmt"
+ "unicode/utf8"
+)
+
+type Min struct {
+ Limit int
+}
+
+func NewMin(l int) Min {
+ return Min{l}
+}
+
+func (self Min) Match(s string) bool {
+ var l int
+ for range s {
+ l += 1
+ if l >= self.Limit {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (self Min) Index(s string) (int, []int) {
+ var count int
+
+ c := len(s) - self.Limit + 1
+ if c <= 0 {
+ return -1, nil
+ }
+
+ segments := acquireSegments(c)
+ for i, r := range s {
+ count++
+ if count >= self.Limit {
+ segments = append(segments, i+utf8.RuneLen(r))
+ }
+ }
+
+ if len(segments) == 0 {
+ return -1, nil
+ }
+
+ return 0, segments
+}
+
+func (self Min) Len() int {
+ return lenNo
+}
+
+func (self Min) String() string {
+ return fmt.Sprintf("", self.Limit)
+}
diff --git a/vendor/github.com/gobwas/glob/match/nothing.go b/vendor/github.com/gobwas/glob/match/nothing.go
new file mode 100644
index 000000000..0d4ecd36b
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/nothing.go
@@ -0,0 +1,27 @@
+package match
+
+import (
+ "fmt"
+)
+
+type Nothing struct{}
+
+func NewNothing() Nothing {
+ return Nothing{}
+}
+
+func (self Nothing) Match(s string) bool {
+ return len(s) == 0
+}
+
+func (self Nothing) Index(s string) (int, []int) {
+ return 0, segments0
+}
+
+func (self Nothing) Len() int {
+ return lenZero
+}
+
+func (self Nothing) String() string {
+ return fmt.Sprintf("")
+}
diff --git a/vendor/github.com/gobwas/glob/match/prefix.go b/vendor/github.com/gobwas/glob/match/prefix.go
new file mode 100644
index 000000000..a7347250e
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/prefix.go
@@ -0,0 +1,50 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+type Prefix struct {
+ Prefix string
+}
+
+func NewPrefix(p string) Prefix {
+ return Prefix{p}
+}
+
+func (self Prefix) Index(s string) (int, []int) {
+ idx := strings.Index(s, self.Prefix)
+ if idx == -1 {
+ return -1, nil
+ }
+
+ length := len(self.Prefix)
+ var sub string
+ if len(s) > idx+length {
+ sub = s[idx+length:]
+ } else {
+ sub = ""
+ }
+
+ segments := acquireSegments(len(sub) + 1)
+ segments = append(segments, length)
+ for i, r := range sub {
+ segments = append(segments, length+i+utf8.RuneLen(r))
+ }
+
+ return idx, segments
+}
+
+func (self Prefix) Len() int {
+ return lenNo
+}
+
+func (self Prefix) Match(s string) bool {
+ return strings.HasPrefix(s, self.Prefix)
+}
+
+func (self Prefix) String() string {
+ return fmt.Sprintf("", self.Prefix)
+}
diff --git a/vendor/github.com/gobwas/glob/match/prefix_any.go b/vendor/github.com/gobwas/glob/match/prefix_any.go
new file mode 100644
index 000000000..8ee58fe1b
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/prefix_any.go
@@ -0,0 +1,55 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ sutil "github.com/gobwas/glob/util/strings"
+)
+
+type PrefixAny struct {
+ Prefix string
+ Separators []rune
+}
+
+func NewPrefixAny(s string, sep []rune) PrefixAny {
+ return PrefixAny{s, sep}
+}
+
+func (self PrefixAny) Index(s string) (int, []int) {
+ idx := strings.Index(s, self.Prefix)
+ if idx == -1 {
+ return -1, nil
+ }
+
+ n := len(self.Prefix)
+ sub := s[idx+n:]
+ i := sutil.IndexAnyRunes(sub, self.Separators)
+ if i > -1 {
+ sub = sub[:i]
+ }
+
+ seg := acquireSegments(len(sub) + 1)
+ seg = append(seg, n)
+ for i, r := range sub {
+ seg = append(seg, n+i+utf8.RuneLen(r))
+ }
+
+ return idx, seg
+}
+
+func (self PrefixAny) Len() int {
+ return lenNo
+}
+
+func (self PrefixAny) Match(s string) bool {
+ if !strings.HasPrefix(s, self.Prefix) {
+ return false
+ }
+ return sutil.IndexAnyRunes(s[len(self.Prefix):], self.Separators) == -1
+}
+
+func (self PrefixAny) String() string {
+ return fmt.Sprintf("", self.Prefix, string(self.Separators))
+}
diff --git a/vendor/github.com/gobwas/glob/match/prefix_suffix.go b/vendor/github.com/gobwas/glob/match/prefix_suffix.go
new file mode 100644
index 000000000..8208085a1
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/prefix_suffix.go
@@ -0,0 +1,62 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+)
+
+type PrefixSuffix struct {
+ Prefix, Suffix string
+}
+
+func NewPrefixSuffix(p, s string) PrefixSuffix {
+ return PrefixSuffix{p, s}
+}
+
+func (self PrefixSuffix) Index(s string) (int, []int) {
+ prefixIdx := strings.Index(s, self.Prefix)
+ if prefixIdx == -1 {
+ return -1, nil
+ }
+
+ suffixLen := len(self.Suffix)
+ if suffixLen <= 0 {
+ return prefixIdx, []int{len(s) - prefixIdx}
+ }
+
+ if (len(s) - prefixIdx) <= 0 {
+ return -1, nil
+ }
+
+ segments := acquireSegments(len(s) - prefixIdx)
+ for sub := s[prefixIdx:]; ; {
+ suffixIdx := strings.LastIndex(sub, self.Suffix)
+ if suffixIdx == -1 {
+ break
+ }
+
+ segments = append(segments, suffixIdx+suffixLen)
+ sub = sub[:suffixIdx]
+ }
+
+ if len(segments) == 0 {
+ releaseSegments(segments)
+ return -1, nil
+ }
+
+ reverseSegments(segments)
+
+ return prefixIdx, segments
+}
+
+func (self PrefixSuffix) Len() int {
+ return lenNo
+}
+
+func (self PrefixSuffix) Match(s string) bool {
+ return strings.HasPrefix(s, self.Prefix) && strings.HasSuffix(s, self.Suffix)
+}
+
+func (self PrefixSuffix) String() string {
+ return fmt.Sprintf("", self.Prefix, self.Suffix)
+}
diff --git a/vendor/github.com/gobwas/glob/match/range.go b/vendor/github.com/gobwas/glob/match/range.go
new file mode 100644
index 000000000..ce30245a4
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/range.go
@@ -0,0 +1,48 @@
+package match
+
+import (
+ "fmt"
+ "unicode/utf8"
+)
+
+type Range struct {
+ Lo, Hi rune
+ Not bool
+}
+
+func NewRange(lo, hi rune, not bool) Range {
+ return Range{lo, hi, not}
+}
+
+func (self Range) Len() int {
+ return lenOne
+}
+
+func (self Range) Match(s string) bool {
+ r, w := utf8.DecodeRuneInString(s)
+ if len(s) > w {
+ return false
+ }
+
+ inRange := r >= self.Lo && r <= self.Hi
+
+ return inRange == !self.Not
+}
+
+func (self Range) Index(s string) (int, []int) {
+ for i, r := range s {
+ if self.Not != (r >= self.Lo && r <= self.Hi) {
+ return i, segmentsByRuneLength[utf8.RuneLen(r)]
+ }
+ }
+
+ return -1, nil
+}
+
+func (self Range) String() string {
+ var not string
+ if self.Not {
+ not = "!"
+ }
+ return fmt.Sprintf("", not, string(self.Lo), string(self.Hi))
+}
diff --git a/vendor/github.com/gobwas/glob/match/row.go b/vendor/github.com/gobwas/glob/match/row.go
new file mode 100644
index 000000000..4379042e4
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/row.go
@@ -0,0 +1,77 @@
+package match
+
+import (
+ "fmt"
+)
+
+type Row struct {
+ Matchers Matchers
+ RunesLength int
+ Segments []int
+}
+
+func NewRow(len int, m ...Matcher) Row {
+ return Row{
+ Matchers: Matchers(m),
+ RunesLength: len,
+ Segments: []int{len},
+ }
+}
+
+func (self Row) matchAll(s string) bool {
+ var idx int
+ for _, m := range self.Matchers {
+ length := m.Len()
+
+ var next, i int
+ for next = range s[idx:] {
+ i++
+ if i == length {
+ break
+ }
+ }
+
+ if i < length || !m.Match(s[idx:idx+next+1]) {
+ return false
+ }
+
+ idx += next + 1
+ }
+
+ return true
+}
+
+func (self Row) lenOk(s string) bool {
+ var i int
+ for range s {
+ i++
+ if i > self.RunesLength {
+ return false
+ }
+ }
+ return self.RunesLength == i
+}
+
+func (self Row) Match(s string) bool {
+ return self.lenOk(s) && self.matchAll(s)
+}
+
+func (self Row) Len() (l int) {
+ return self.RunesLength
+}
+
+func (self Row) Index(s string) (int, []int) {
+ for i := range s {
+ if len(s[i:]) < self.RunesLength {
+ break
+ }
+ if self.matchAll(s[i:]) {
+ return i, self.Segments
+ }
+ }
+ return -1, nil
+}
+
+func (self Row) String() string {
+ return fmt.Sprintf("", self.RunesLength, self.Matchers)
+}
diff --git a/vendor/github.com/gobwas/glob/match/segments.go b/vendor/github.com/gobwas/glob/match/segments.go
new file mode 100644
index 000000000..9ea6f3094
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/segments.go
@@ -0,0 +1,91 @@
+package match
+
+import (
+ "sync"
+)
+
+type SomePool interface {
+ Get() []int
+ Put([]int)
+}
+
+var segmentsPools [1024]sync.Pool
+
+func toPowerOfTwo(v int) int {
+ v--
+ v |= v >> 1
+ v |= v >> 2
+ v |= v >> 4
+ v |= v >> 8
+ v |= v >> 16
+ v++
+
+ return v
+}
+
+const (
+ cacheFrom = 16
+ cacheToAndHigher = 1024
+ cacheFromIndex = 15
+ cacheToAndHigherIndex = 1023
+)
+
+var (
+ segments0 = []int{0}
+ segments1 = []int{1}
+ segments2 = []int{2}
+ segments3 = []int{3}
+ segments4 = []int{4}
+)
+
+var segmentsByRuneLength [5][]int = [5][]int{
+ 0: segments0,
+ 1: segments1,
+ 2: segments2,
+ 3: segments3,
+ 4: segments4,
+}
+
+func init() {
+ for i := cacheToAndHigher; i >= cacheFrom; i >>= 1 {
+ func(i int) {
+ segmentsPools[i-1] = sync.Pool{New: func() interface{} {
+ return make([]int, 0, i)
+ }}
+ }(i)
+ }
+}
+
+func getTableIndex(c int) int {
+ p := toPowerOfTwo(c)
+ switch {
+ case p >= cacheToAndHigher:
+ return cacheToAndHigherIndex
+ case p <= cacheFrom:
+ return cacheFromIndex
+ default:
+ return p - 1
+ }
+}
+
+func acquireSegments(c int) []int {
+ // make []int with less capacity than cacheFrom
+ // is faster than acquiring it from pool
+ if c < cacheFrom {
+ return make([]int, 0, c)
+ }
+
+ return segmentsPools[getTableIndex(c)].Get().([]int)[:0]
+}
+
+func releaseSegments(s []int) {
+ c := cap(s)
+
+ // make []int with less capacity than cacheFrom
+ // is faster than acquiring it from pool
+ if c < cacheFrom {
+ return
+ }
+
+ segmentsPools[getTableIndex(c)].Put(s)
+}
diff --git a/vendor/github.com/gobwas/glob/match/single.go b/vendor/github.com/gobwas/glob/match/single.go
new file mode 100644
index 000000000..ee6e3954c
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/single.go
@@ -0,0 +1,43 @@
+package match
+
+import (
+ "fmt"
+ "github.com/gobwas/glob/util/runes"
+ "unicode/utf8"
+)
+
+// single represents ?
+type Single struct {
+ Separators []rune
+}
+
+func NewSingle(s []rune) Single {
+ return Single{s}
+}
+
+func (self Single) Match(s string) bool {
+ r, w := utf8.DecodeRuneInString(s)
+ if len(s) > w {
+ return false
+ }
+
+ return runes.IndexRune(self.Separators, r) == -1
+}
+
+func (self Single) Len() int {
+ return lenOne
+}
+
+func (self Single) Index(s string) (int, []int) {
+ for i, r := range s {
+ if runes.IndexRune(self.Separators, r) == -1 {
+ return i, segmentsByRuneLength[utf8.RuneLen(r)]
+ }
+ }
+
+ return -1, nil
+}
+
+func (self Single) String() string {
+ return fmt.Sprintf("", string(self.Separators))
+}
diff --git a/vendor/github.com/gobwas/glob/match/suffix.go b/vendor/github.com/gobwas/glob/match/suffix.go
new file mode 100644
index 000000000..85bea8c68
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/suffix.go
@@ -0,0 +1,35 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+)
+
+type Suffix struct {
+ Suffix string
+}
+
+func NewSuffix(s string) Suffix {
+ return Suffix{s}
+}
+
+func (self Suffix) Len() int {
+ return lenNo
+}
+
+func (self Suffix) Match(s string) bool {
+ return strings.HasSuffix(s, self.Suffix)
+}
+
+func (self Suffix) Index(s string) (int, []int) {
+ idx := strings.Index(s, self.Suffix)
+ if idx == -1 {
+ return -1, nil
+ }
+
+ return 0, []int{idx + len(self.Suffix)}
+}
+
+func (self Suffix) String() string {
+ return fmt.Sprintf("", self.Suffix)
+}
diff --git a/vendor/github.com/gobwas/glob/match/suffix_any.go b/vendor/github.com/gobwas/glob/match/suffix_any.go
new file mode 100644
index 000000000..c5106f819
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/suffix_any.go
@@ -0,0 +1,43 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+
+ sutil "github.com/gobwas/glob/util/strings"
+)
+
+type SuffixAny struct {
+ Suffix string
+ Separators []rune
+}
+
+func NewSuffixAny(s string, sep []rune) SuffixAny {
+ return SuffixAny{s, sep}
+}
+
+func (self SuffixAny) Index(s string) (int, []int) {
+ idx := strings.Index(s, self.Suffix)
+ if idx == -1 {
+ return -1, nil
+ }
+
+ i := sutil.LastIndexAnyRunes(s[:idx], self.Separators) + 1
+
+ return i, []int{idx + len(self.Suffix) - i}
+}
+
+func (self SuffixAny) Len() int {
+ return lenNo
+}
+
+func (self SuffixAny) Match(s string) bool {
+ if !strings.HasSuffix(s, self.Suffix) {
+ return false
+ }
+ return sutil.IndexAnyRunes(s[:len(s)-len(self.Suffix)], self.Separators) == -1
+}
+
+func (self SuffixAny) String() string {
+ return fmt.Sprintf("", string(self.Separators), self.Suffix)
+}
diff --git a/vendor/github.com/gobwas/glob/match/super.go b/vendor/github.com/gobwas/glob/match/super.go
new file mode 100644
index 000000000..3875950bb
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/super.go
@@ -0,0 +1,33 @@
+package match
+
+import (
+ "fmt"
+)
+
+type Super struct{}
+
+func NewSuper() Super {
+ return Super{}
+}
+
+func (self Super) Match(s string) bool {
+ return true
+}
+
+func (self Super) Len() int {
+ return lenNo
+}
+
+func (self Super) Index(s string) (int, []int) {
+ segments := acquireSegments(len(s) + 1)
+ for i := range s {
+ segments = append(segments, i)
+ }
+ segments = append(segments, len(s))
+
+ return 0, segments
+}
+
+func (self Super) String() string {
+ return fmt.Sprintf("")
+}
diff --git a/vendor/github.com/gobwas/glob/match/text.go b/vendor/github.com/gobwas/glob/match/text.go
new file mode 100644
index 000000000..0a17616d3
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/text.go
@@ -0,0 +1,45 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+// raw represents raw string to match
+type Text struct {
+ Str string
+ RunesLength int
+ BytesLength int
+ Segments []int
+}
+
+func NewText(s string) Text {
+ return Text{
+ Str: s,
+ RunesLength: utf8.RuneCountInString(s),
+ BytesLength: len(s),
+ Segments: []int{len(s)},
+ }
+}
+
+func (self Text) Match(s string) bool {
+ return self.Str == s
+}
+
+func (self Text) Len() int {
+ return self.RunesLength
+}
+
+func (self Text) Index(s string) (int, []int) {
+ index := strings.Index(s, self.Str)
+ if index == -1 {
+ return -1, nil
+ }
+
+ return index, self.Segments
+}
+
+func (self Text) String() string {
+ return fmt.Sprintf("", self.Str)
+}
diff --git a/vendor/github.com/gobwas/glob/readme.md b/vendor/github.com/gobwas/glob/readme.md
new file mode 100644
index 000000000..f58144e73
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/readme.md
@@ -0,0 +1,148 @@
+# glob.[go](https://golang.org)
+
+[![GoDoc][godoc-image]][godoc-url] [![Build Status][travis-image]][travis-url]
+
+> Go Globbing Library.
+
+## Install
+
+```shell
+ go get github.com/gobwas/glob
+```
+
+## Example
+
+```go
+
+package main
+
+import "github.com/gobwas/glob"
+
+func main() {
+ var g glob.Glob
+
+ // create simple glob
+ g = glob.MustCompile("*.github.com")
+ g.Match("api.github.com") // true
+
+ // quote meta characters and then create simple glob
+ g = glob.MustCompile(glob.QuoteMeta("*.github.com"))
+ g.Match("*.github.com") // true
+
+ // create new glob with set of delimiters as ["."]
+ g = glob.MustCompile("api.*.com", '.')
+ g.Match("api.github.com") // true
+ g.Match("api.gi.hub.com") // false
+
+ // create new glob with set of delimiters as ["."]
+ // but now with super wildcard
+ g = glob.MustCompile("api.**.com", '.')
+ g.Match("api.github.com") // true
+ g.Match("api.gi.hub.com") // true
+
+ // create glob with single symbol wildcard
+ g = glob.MustCompile("?at")
+ g.Match("cat") // true
+ g.Match("fat") // true
+ g.Match("at") // false
+
+ // create glob with single symbol wildcard and delimiters ['f']
+ g = glob.MustCompile("?at", 'f')
+ g.Match("cat") // true
+ g.Match("fat") // false
+ g.Match("at") // false
+
+ // create glob with character-list matchers
+ g = glob.MustCompile("[abc]at")
+ g.Match("cat") // true
+ g.Match("bat") // true
+ g.Match("fat") // false
+ g.Match("at") // false
+
+ // create glob with character-list matchers
+ g = glob.MustCompile("[!abc]at")
+ g.Match("cat") // false
+ g.Match("bat") // false
+ g.Match("fat") // true
+ g.Match("at") // false
+
+ // create glob with character-range matchers
+ g = glob.MustCompile("[a-c]at")
+ g.Match("cat") // true
+ g.Match("bat") // true
+ g.Match("fat") // false
+ g.Match("at") // false
+
+ // create glob with character-range matchers
+ g = glob.MustCompile("[!a-c]at")
+ g.Match("cat") // false
+ g.Match("bat") // false
+ g.Match("fat") // true
+ g.Match("at") // false
+
+ // create glob with pattern-alternatives list
+ g = glob.MustCompile("{cat,bat,[fr]at}")
+ g.Match("cat") // true
+ g.Match("bat") // true
+ g.Match("fat") // true
+ g.Match("rat") // true
+ g.Match("at") // false
+ g.Match("zat") // false
+}
+
+```
+
+## Performance
+
+This library is created for compile-once patterns. This means, that compilation could take time, but
+strings matching is done faster, than in case when always parsing template.
+
+If you will not use compiled `glob.Glob` object, and do `g := glob.MustCompile(pattern); g.Match(...)` every time, then your code will be much more slower.
+
+Run `go test -bench=.` from source root to see the benchmarks:
+
+Pattern | Fixture | Match | Speed (ns/op)
+--------|---------|-------|--------------
+`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my cat has very bright eyes` | `true` | 432
+`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my dog has very bright eyes` | `false` | 199
+`https://*.google.*` | `https://account.google.com` | `true` | 96
+`https://*.google.*` | `https://google.com` | `false` | 66
+`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://yahoo.com` | `true` | 163
+`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://google.com` | `false` | 197
+`{https://*gobwas.com,http://exclude.gobwas.com}` | `https://safe.gobwas.com` | `true` | 22
+`{https://*gobwas.com,http://exclude.gobwas.com}` | `http://safe.gobwas.com` | `false` | 24
+`abc*` | `abcdef` | `true` | 8.15
+`abc*` | `af` | `false` | 5.68
+`*def` | `abcdef` | `true` | 8.84
+`*def` | `af` | `false` | 5.74
+`ab*ef` | `abcdef` | `true` | 15.2
+`ab*ef` | `af` | `false` | 10.4
+
+The same things with `regexp` package:
+
+Pattern | Fixture | Match | Speed (ns/op)
+--------|---------|-------|--------------
+`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my cat has very bright eyes` | `true` | 2553
+`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my dog has very bright eyes` | `false` | 1383
+`^https:\/\/.*\.google\..*$` | `https://account.google.com` | `true` | 1205
+`^https:\/\/.*\.google\..*$` | `https://google.com` | `false` | 767
+`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://yahoo.com` | `true` | 1435
+`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://google.com` | `false` | 1674
+`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `https://safe.gobwas.com` | `true` | 1039
+`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `http://safe.gobwas.com` | `false` | 272
+`^abc.*$` | `abcdef` | `true` | 237
+`^abc.*$` | `af` | `false` | 100
+`^.*def$` | `abcdef` | `true` | 464
+`^.*def$` | `af` | `false` | 265
+`^ab.*ef$` | `abcdef` | `true` | 375
+`^ab.*ef$` | `af` | `false` | 145
+
+[godoc-image]: https://godoc.org/github.com/gobwas/glob?status.svg
+[godoc-url]: https://godoc.org/github.com/gobwas/glob
+[travis-image]: https://travis-ci.org/gobwas/glob.svg?branch=master
+[travis-url]: https://travis-ci.org/gobwas/glob
+
+## Syntax
+
+Syntax is inspired by [standard wildcards](http://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm),
+except that `**` is aka super-asterisk, that do not sensitive for separators.
\ No newline at end of file
diff --git a/vendor/github.com/gobwas/glob/syntax/ast/ast.go b/vendor/github.com/gobwas/glob/syntax/ast/ast.go
new file mode 100644
index 000000000..3220a694a
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/syntax/ast/ast.go
@@ -0,0 +1,122 @@
+package ast
+
+import (
+ "bytes"
+ "fmt"
+)
+
+type Node struct {
+ Parent *Node
+ Children []*Node
+ Value interface{}
+ Kind Kind
+}
+
+func NewNode(k Kind, v interface{}, ch ...*Node) *Node {
+ n := &Node{
+ Kind: k,
+ Value: v,
+ }
+ for _, c := range ch {
+ Insert(n, c)
+ }
+ return n
+}
+
+func (a *Node) Equal(b *Node) bool {
+ if a.Kind != b.Kind {
+ return false
+ }
+ if a.Value != b.Value {
+ return false
+ }
+ if len(a.Children) != len(b.Children) {
+ return false
+ }
+ for i, c := range a.Children {
+ if !c.Equal(b.Children[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Node) String() string {
+ var buf bytes.Buffer
+ buf.WriteString(a.Kind.String())
+ if a.Value != nil {
+ buf.WriteString(" =")
+ buf.WriteString(fmt.Sprintf("%v", a.Value))
+ }
+ if len(a.Children) > 0 {
+ buf.WriteString(" [")
+ for i, c := range a.Children {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(c.String())
+ }
+ buf.WriteString("]")
+ }
+ return buf.String()
+}
+
+func Insert(parent *Node, children ...*Node) {
+ parent.Children = append(parent.Children, children...)
+ for _, ch := range children {
+ ch.Parent = parent
+ }
+}
+
+type List struct {
+ Not bool
+ Chars string
+}
+
+type Range struct {
+ Not bool
+ Lo, Hi rune
+}
+
+type Text struct {
+ Text string
+}
+
+type Kind int
+
+const (
+ KindNothing Kind = iota
+ KindPattern
+ KindList
+ KindRange
+ KindText
+ KindAny
+ KindSuper
+ KindSingle
+ KindAnyOf
+)
+
+func (k Kind) String() string {
+ switch k {
+ case KindNothing:
+ return "Nothing"
+ case KindPattern:
+ return "Pattern"
+ case KindList:
+ return "List"
+ case KindRange:
+ return "Range"
+ case KindText:
+ return "Text"
+ case KindAny:
+ return "Any"
+ case KindSuper:
+ return "Super"
+ case KindSingle:
+ return "Single"
+ case KindAnyOf:
+ return "AnyOf"
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/gobwas/glob/syntax/ast/parser.go b/vendor/github.com/gobwas/glob/syntax/ast/parser.go
new file mode 100644
index 000000000..429b40943
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/syntax/ast/parser.go
@@ -0,0 +1,157 @@
+package ast
+
+import (
+ "errors"
+ "fmt"
+ "github.com/gobwas/glob/syntax/lexer"
+ "unicode/utf8"
+)
+
+type Lexer interface {
+ Next() lexer.Token
+}
+
+type parseFn func(*Node, Lexer) (parseFn, *Node, error)
+
+func Parse(lexer Lexer) (*Node, error) {
+ var parser parseFn
+
+ root := NewNode(KindPattern, nil)
+
+ var (
+ tree *Node
+ err error
+ )
+ for parser, tree = parserMain, root; parser != nil; {
+ parser, tree, err = parser(tree, lexer)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return root, nil
+}
+
+func parserMain(tree *Node, lex Lexer) (parseFn, *Node, error) {
+ for {
+ token := lex.Next()
+ switch token.Type {
+ case lexer.EOF:
+ return nil, tree, nil
+
+ case lexer.Error:
+ return nil, tree, errors.New(token.Raw)
+
+ case lexer.Text:
+ Insert(tree, NewNode(KindText, Text{token.Raw}))
+ return parserMain, tree, nil
+
+ case lexer.Any:
+ Insert(tree, NewNode(KindAny, nil))
+ return parserMain, tree, nil
+
+ case lexer.Super:
+ Insert(tree, NewNode(KindSuper, nil))
+ return parserMain, tree, nil
+
+ case lexer.Single:
+ Insert(tree, NewNode(KindSingle, nil))
+ return parserMain, tree, nil
+
+ case lexer.RangeOpen:
+ return parserRange, tree, nil
+
+ case lexer.TermsOpen:
+ a := NewNode(KindAnyOf, nil)
+ Insert(tree, a)
+
+ p := NewNode(KindPattern, nil)
+ Insert(a, p)
+
+ return parserMain, p, nil
+
+ case lexer.Separator:
+ p := NewNode(KindPattern, nil)
+ Insert(tree.Parent, p)
+
+ return parserMain, p, nil
+
+ case lexer.TermsClose:
+ return parserMain, tree.Parent.Parent, nil
+
+ default:
+ return nil, tree, fmt.Errorf("unexpected token: %s", token)
+ }
+ }
+ return nil, tree, fmt.Errorf("unknown error")
+}
+
+func parserRange(tree *Node, lex Lexer) (parseFn, *Node, error) {
+ var (
+ not bool
+ lo rune
+ hi rune
+ chars string
+ )
+ for {
+ token := lex.Next()
+ switch token.Type {
+ case lexer.EOF:
+ return nil, tree, errors.New("unexpected end")
+
+ case lexer.Error:
+ return nil, tree, errors.New(token.Raw)
+
+ case lexer.Not:
+ not = true
+
+ case lexer.RangeLo:
+ r, w := utf8.DecodeRuneInString(token.Raw)
+ if len(token.Raw) > w {
+ return nil, tree, fmt.Errorf("unexpected length of lo character")
+ }
+ lo = r
+
+ case lexer.RangeBetween:
+ //
+
+ case lexer.RangeHi:
+ r, w := utf8.DecodeRuneInString(token.Raw)
+ if len(token.Raw) > w {
+ return nil, tree, fmt.Errorf("unexpected length of lo character")
+ }
+
+ hi = r
+
+ if hi < lo {
+ return nil, tree, fmt.Errorf("hi character '%s' should be greater than lo '%s'", string(hi), string(lo))
+ }
+
+ case lexer.Text:
+ chars = token.Raw
+
+ case lexer.RangeClose:
+ isRange := lo != 0 && hi != 0
+ isChars := chars != ""
+
+ if isChars == isRange {
+ return nil, tree, fmt.Errorf("could not parse range")
+ }
+
+ if isRange {
+ Insert(tree, NewNode(KindRange, Range{
+ Lo: lo,
+ Hi: hi,
+ Not: not,
+ }))
+ } else {
+ Insert(tree, NewNode(KindList, List{
+ Chars: chars,
+ Not: not,
+ }))
+ }
+
+ return parserMain, tree, nil
+ }
+ }
+}
diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go
new file mode 100644
index 000000000..a1c8d1962
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go
@@ -0,0 +1,273 @@
+package lexer
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/gobwas/glob/util/runes"
+ "unicode/utf8"
+)
+
+const (
+ char_any = '*'
+ char_comma = ','
+ char_single = '?'
+ char_escape = '\\'
+ char_range_open = '['
+ char_range_close = ']'
+ char_terms_open = '{'
+ char_terms_close = '}'
+ char_range_not = '!'
+ char_range_between = '-'
+)
+
+var specials = []byte{
+ char_any,
+ char_single,
+ char_escape,
+ char_range_open,
+ char_range_close,
+ char_terms_open,
+ char_terms_close,
+}
+
+func Special(c byte) bool {
+ return bytes.IndexByte(specials, c) != -1
+}
+
+type tokens []Token
+
+func (i *tokens) shift() (ret Token) {
+ ret = (*i)[0]
+ copy(*i, (*i)[1:])
+ *i = (*i)[:len(*i)-1]
+ return
+}
+
+func (i *tokens) push(v Token) {
+ *i = append(*i, v)
+}
+
+func (i *tokens) empty() bool {
+ return len(*i) == 0
+}
+
+var eof rune = 0
+
+type lexer struct {
+ data string
+ pos int
+ err error
+
+ tokens tokens
+ termsLevel int
+
+ lastRune rune
+ lastRuneSize int
+ hasRune bool
+}
+
+func NewLexer(source string) *lexer {
+ l := &lexer{
+ data: source,
+ tokens: tokens(make([]Token, 0, 4)),
+ }
+ return l
+}
+
+func (l *lexer) Next() Token {
+ if l.err != nil {
+ return Token{Error, l.err.Error()}
+ }
+ if !l.tokens.empty() {
+ return l.tokens.shift()
+ }
+
+ l.fetchItem()
+ return l.Next()
+}
+
+func (l *lexer) peek() (r rune, w int) {
+ if l.pos == len(l.data) {
+ return eof, 0
+ }
+
+ r, w = utf8.DecodeRuneInString(l.data[l.pos:])
+ if r == utf8.RuneError {
+ l.errorf("could not read rune")
+ r = eof
+ w = 0
+ }
+
+ return
+}
+
+func (l *lexer) read() rune {
+ if l.hasRune {
+ l.hasRune = false
+ l.seek(l.lastRuneSize)
+ return l.lastRune
+ }
+
+ r, s := l.peek()
+ l.seek(s)
+
+ l.lastRune = r
+ l.lastRuneSize = s
+
+ return r
+}
+
+func (l *lexer) seek(w int) {
+ l.pos += w
+}
+
+func (l *lexer) unread() {
+ if l.hasRune {
+ l.errorf("could not unread rune")
+ return
+ }
+ l.seek(-l.lastRuneSize)
+ l.hasRune = true
+}
+
+func (l *lexer) errorf(f string, v ...interface{}) {
+ l.err = fmt.Errorf(f, v...)
+}
+
+func (l *lexer) inTerms() bool {
+ return l.termsLevel > 0
+}
+
+func (l *lexer) termsEnter() {
+ l.termsLevel++
+}
+
+func (l *lexer) termsLeave() {
+ l.termsLevel--
+}
+
+var inTextBreakers = []rune{char_single, char_any, char_range_open, char_terms_open}
+var inTermsBreakers = append(inTextBreakers, char_terms_close, char_comma)
+
+func (l *lexer) fetchItem() {
+ r := l.read()
+ switch {
+ case r == eof:
+ l.tokens.push(Token{EOF, ""})
+
+ case r == char_terms_open:
+ l.termsEnter()
+ l.tokens.push(Token{TermsOpen, string(r)})
+
+ case r == char_comma && l.inTerms():
+ l.tokens.push(Token{Separator, string(r)})
+
+ case r == char_terms_close && l.inTerms():
+ l.tokens.push(Token{TermsClose, string(r)})
+ l.termsLeave()
+
+ case r == char_range_open:
+ l.tokens.push(Token{RangeOpen, string(r)})
+ l.fetchRange()
+
+ case r == char_single:
+ l.tokens.push(Token{Single, string(r)})
+
+ case r == char_any:
+ if l.read() == char_any {
+ l.tokens.push(Token{Super, string(r) + string(r)})
+ } else {
+ l.unread()
+ l.tokens.push(Token{Any, string(r)})
+ }
+
+ default:
+ l.unread()
+
+ var breakers []rune
+ if l.inTerms() {
+ breakers = inTermsBreakers
+ } else {
+ breakers = inTextBreakers
+ }
+ l.fetchText(breakers)
+ }
+}
+
+func (l *lexer) fetchRange() {
+ var wantHi bool
+ var wantClose bool
+ var seenNot bool
+ for {
+ r := l.read()
+ if r == eof {
+ l.errorf("unexpected end of input")
+ return
+ }
+
+ if wantClose {
+ if r != char_range_close {
+ l.errorf("expected close range character")
+ } else {
+ l.tokens.push(Token{RangeClose, string(r)})
+ }
+ return
+ }
+
+ if wantHi {
+ l.tokens.push(Token{RangeHi, string(r)})
+ wantClose = true
+ continue
+ }
+
+ if !seenNot && r == char_range_not {
+ l.tokens.push(Token{Not, string(r)})
+ seenNot = true
+ continue
+ }
+
+ if n, w := l.peek(); n == char_range_between {
+ l.seek(w)
+ l.tokens.push(Token{RangeLo, string(r)})
+ l.tokens.push(Token{RangeBetween, string(n)})
+ wantHi = true
+ continue
+ }
+
+ l.unread() // unread first peek and fetch as text
+ l.fetchText([]rune{char_range_close})
+ wantClose = true
+ }
+}
+
+func (l *lexer) fetchText(breakers []rune) {
+ var data []rune
+ var escaped bool
+
+reading:
+ for {
+ r := l.read()
+ if r == eof {
+ break
+ }
+
+ if !escaped {
+ if r == char_escape {
+ escaped = true
+ continue
+ }
+
+ if runes.IndexRune(breakers, r) != -1 {
+ l.unread()
+ break reading
+ }
+ }
+
+ escaped = false
+ data = append(data, r)
+ }
+
+ if len(data) > 0 {
+ l.tokens.push(Token{Text, string(data)})
+ }
+}
diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/token.go b/vendor/github.com/gobwas/glob/syntax/lexer/token.go
new file mode 100644
index 000000000..2797c4e83
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/syntax/lexer/token.go
@@ -0,0 +1,88 @@
+package lexer
+
+import "fmt"
+
+type TokenType int
+
+const (
+ EOF TokenType = iota
+ Error
+ Text
+ Char
+ Any
+ Super
+ Single
+ Not
+ Separator
+ RangeOpen
+ RangeClose
+ RangeLo
+ RangeHi
+ RangeBetween
+ TermsOpen
+ TermsClose
+)
+
+func (tt TokenType) String() string {
+ switch tt {
+ case EOF:
+ return "eof"
+
+ case Error:
+ return "error"
+
+ case Text:
+ return "text"
+
+ case Char:
+ return "char"
+
+ case Any:
+ return "any"
+
+ case Super:
+ return "super"
+
+ case Single:
+ return "single"
+
+ case Not:
+ return "not"
+
+ case Separator:
+ return "separator"
+
+ case RangeOpen:
+ return "range_open"
+
+ case RangeClose:
+ return "range_close"
+
+ case RangeLo:
+ return "range_lo"
+
+ case RangeHi:
+ return "range_hi"
+
+ case RangeBetween:
+ return "range_between"
+
+ case TermsOpen:
+ return "terms_open"
+
+ case TermsClose:
+ return "terms_close"
+
+ default:
+ return "undef"
+ }
+}
+
+type Token struct {
+ Type TokenType
+ Raw string
+}
+
+func (t Token) String() string {
+ return fmt.Sprintf("%v<%q>", t.Type, t.Raw)
+}
diff --git a/vendor/github.com/gobwas/glob/syntax/syntax.go b/vendor/github.com/gobwas/glob/syntax/syntax.go
new file mode 100644
index 000000000..1d168b148
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/syntax/syntax.go
@@ -0,0 +1,14 @@
+package syntax
+
+import (
+ "github.com/gobwas/glob/syntax/ast"
+ "github.com/gobwas/glob/syntax/lexer"
+)
+
+func Parse(s string) (*ast.Node, error) {
+ return ast.Parse(lexer.NewLexer(s))
+}
+
+func Special(b byte) bool {
+ return lexer.Special(b)
+}
diff --git a/vendor/github.com/gobwas/glob/util/runes/runes.go b/vendor/github.com/gobwas/glob/util/runes/runes.go
new file mode 100644
index 000000000..a72355641
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/util/runes/runes.go
@@ -0,0 +1,154 @@
+package runes
+
+func Index(s, needle []rune) int {
+ ls, ln := len(s), len(needle)
+
+ switch {
+ case ln == 0:
+ return 0
+ case ln == 1:
+ return IndexRune(s, needle[0])
+ case ln == ls:
+ if Equal(s, needle) {
+ return 0
+ }
+ return -1
+ case ln > ls:
+ return -1
+ }
+
+head:
+ for i := 0; i < ls && ls-i >= ln; i++ {
+ for y := 0; y < ln; y++ {
+ if s[i+y] != needle[y] {
+ continue head
+ }
+ }
+
+ return i
+ }
+
+ return -1
+}
+
+func LastIndex(s, needle []rune) int {
+ ls, ln := len(s), len(needle)
+
+ switch {
+ case ln == 0:
+ if ls == 0 {
+ return 0
+ }
+ return ls
+ case ln == 1:
+ return IndexLastRune(s, needle[0])
+ case ln == ls:
+ if Equal(s, needle) {
+ return 0
+ }
+ return -1
+ case ln > ls:
+ return -1
+ }
+
+head:
+ for i := ls - 1; i >= 0 && i >= ln; i-- {
+ for y := ln - 1; y >= 0; y-- {
+ if s[i-(ln-y-1)] != needle[y] {
+ continue head
+ }
+ }
+
+ return i - ln + 1
+ }
+
+ return -1
+}
+
+// IndexAny returns the index of the first instance of any Unicode code point
+// from chars in s, or -1 if no Unicode code point from chars is present in s.
+func IndexAny(s, chars []rune) int {
+ if len(chars) > 0 {
+ for i, c := range s {
+ for _, m := range chars {
+ if c == m {
+ return i
+ }
+ }
+ }
+ }
+ return -1
+}
+
+func Contains(s, needle []rune) bool {
+ return Index(s, needle) >= 0
+}
+
+func Max(s []rune) (max rune) {
+ for _, r := range s {
+ if r > max {
+ max = r
+ }
+ }
+
+ return
+}
+
+func Min(s []rune) rune {
+ min := rune(-1)
+ for _, r := range s {
+ if min == -1 {
+ min = r
+ continue
+ }
+
+ if r < min {
+ min = r
+ }
+ }
+
+ return min
+}
+
+func IndexRune(s []rune, r rune) int {
+ for i, c := range s {
+ if c == r {
+ return i
+ }
+ }
+ return -1
+}
+
+func IndexLastRune(s []rune, r rune) int {
+ for i := len(s) - 1; i >= 0; i-- {
+ if s[i] == r {
+ return i
+ }
+ }
+
+ return -1
+}
+
+func Equal(a, b []rune) bool {
+ if len(a) == len(b) {
+ for i := 0; i < len(a); i++ {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+
+ return true
+ }
+
+ return false
+}
+
+// HasPrefix tests whether the string s begins with prefix.
+func HasPrefix(s, prefix []rune) bool {
+ return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
+}
+
+// HasSuffix tests whether the string s ends with suffix.
+func HasSuffix(s, suffix []rune) bool {
+ return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix)
+}
diff --git a/vendor/github.com/gobwas/glob/util/strings/strings.go b/vendor/github.com/gobwas/glob/util/strings/strings.go
new file mode 100644
index 000000000..e8ee1920b
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/util/strings/strings.go
@@ -0,0 +1,39 @@
+package strings
+
+import (
+ "strings"
+ "unicode/utf8"
+)
+
+func IndexAnyRunes(s string, rs []rune) int {
+ for _, r := range rs {
+ if i := strings.IndexRune(s, r); i != -1 {
+ return i
+ }
+ }
+
+ return -1
+}
+
+func LastIndexAnyRunes(s string, rs []rune) int {
+ for _, r := range rs {
+ i := -1
+ if 0 <= r && r < utf8.RuneSelf {
+ i = strings.LastIndexByte(s, byte(r))
+ } else {
+ sub := s
+ for len(sub) > 0 {
+ j := strings.IndexRune(s, r)
+ if j == -1 {
+ break
+ }
+ i = j
+ sub = sub[i+1:]
+ }
+ }
+ if i != -1 {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/vendor/github.com/gocolly/colly/.codecov.yml b/vendor/github.com/gocolly/colly/.codecov.yml
new file mode 100644
index 000000000..69cb76019
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/.codecov.yml
@@ -0,0 +1 @@
+comment: false
diff --git a/vendor/github.com/gocolly/colly/.travis.yml b/vendor/github.com/gocolly/colly/.travis.yml
new file mode 100644
index 000000000..d72ef3847
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+sudo: false
+go:
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+ - tip
+script:
+ - go get -u golang.org/x/lint/golint
+ - OUT="$(go get -a)"; test -z "$OUT" || (echo "$OUT" && return 1)
+ - OUT="$(gofmt -l -d ./)"; test -z "$OUT" || (echo "$OUT" && return 1)
+ - OUT="$(golint ./...)"; test -z "$OUT" || (echo "$OUT" && return 1)
+ - go vet -v ./...
+ - go test -race -v -coverprofile=coverage.txt -covermode=atomic ./
+ - go build
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/gocolly/colly/CHANGELOG.md b/vendor/github.com/gocolly/colly/CHANGELOG.md
new file mode 100644
index 000000000..933d9eff1
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/CHANGELOG.md
@@ -0,0 +1,23 @@
+# 1.2.0 - 2019.02.13
+
+ - Compatibility with the latest htmlquery package
+ - New request shortcut for HEAD requests
+ - Check URL availibility before visiting
+ - Fix proxy URL value
+ - Request counter fix
+ - Minor fixes in examples
+
+# 1.1.0 - 2018.08.13
+
+ - Appengine integration takes context.Context instead of http.Request (API change)
+ - Added "Accept" http header by default to every request
+ - Support slices of pointers in unmarshal
+ - Fixed a race condition in queues
+ - ForEachWithBreak method added to HTMLElement
+ - Added a local file example
+ - Support gzip decompression of response bodies
+ - Don't share waitgroup when cloning a collector
+ - Fixed instagram example
+
+
+# 1.0.0 - 2018.05.13
diff --git a/vendor/github.com/gocolly/colly/CONTRIBUTING.md b/vendor/github.com/gocolly/colly/CONTRIBUTING.md
new file mode 100644
index 000000000..17df63602
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/CONTRIBUTING.md
@@ -0,0 +1,67 @@
+# Contribute
+
+## Introduction
+
+First, thank you for considering contributing to colly! It's people like you that make the open source community such a great community! 😊
+
+We welcome any type of contribution, not only code. You can help with
+- **QA**: file bug reports, the more details you can give the better (e.g. screenshots with the console open)
+- **Marketing**: writing blog posts, howto's, printing stickers, ...
+- **Community**: presenting the project at meetups, organizing a dedicated meetup for the local community, ...
+- **Code**: take a look at the [open issues](https://github.com/gocolly/colly/issues). Even if you can't write code, commenting on them, showing that you care about a given issue matters. It helps us triage them.
+- **Money**: we welcome financial contributions in full transparency on our [open collective](https://opencollective.com/colly).
+
+## Your First Contribution
+
+Working on your first Pull Request? You can learn how from this *free* series, [How to Contribute to an Open Source Project on GitHub](https://egghead.io/series/how-to-contribute-to-an-open-source-project-on-github).
+
+## Submitting code
+
+Any code change should be submitted as a pull request. The description should explain what the code does and give steps to execute it. The pull request should also contain tests.
+
+## Code review process
+
+The bigger the pull request, the longer it will take to review and merge. Try to break down large pull requests in smaller chunks that are easier to review and merge.
+It is also always helpful to have some context for your pull request. What was the purpose? Why does it matter to you?
+
+## Financial contributions
+
+We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/colly).
+Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed.
+
+## Questions
+
+If you have any questions, create an [issue](https://github.com/gocolly/colly/issues/new) (protip: do a quick search first to see if someone else didn't ask the same question before!).
+You can also reach us at hello@colly.opencollective.com.
+
+## Credits
+
+### Contributors
+
+Thank you to all the people who have already contributed to colly!
+
+
+
+### Backers
+
+Thank you to all our backers! [[Become a backer](https://opencollective.com/colly#backer)]
+
+
+
+
+### Sponsors
+
+Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/colly#sponsor))
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/gocolly/colly/LICENSE.txt b/vendor/github.com/gocolly/colly/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/gocolly/colly/README.md b/vendor/github.com/gocolly/colly/README.md
new file mode 100644
index 000000000..06e73cbea
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/README.md
@@ -0,0 +1,112 @@
+# Colly
+
+Lightning Fast and Elegant Scraping Framework for Gophers
+
+Colly provides a clean interface to write any kind of crawler/scraper/spider.
+
+With Colly you can easily extract structured data from websites, which can be used for a wide range of applications, like data mining, data processing or archiving.
+
+[![GoDoc](https://godoc.org/github.com/gocolly/colly?status.svg)](https://godoc.org/github.com/gocolly/colly)
+[![Backers on Open Collective](https://opencollective.com/colly/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/colly/sponsors/badge.svg)](#sponsors) [![build status](https://img.shields.io/travis/gocolly/colly/master.svg?style=flat-square)](https://travis-ci.org/gocolly/colly)
+[![report card](https://img.shields.io/badge/report%20card-a%2B-ff3333.svg?style=flat-square)](http://goreportcard.com/report/gocolly/colly)
+[![view examples](https://img.shields.io/badge/learn%20by-examples-0077b3.svg?style=flat-square)](https://github.com/gocolly/colly/tree/master/_examples)
+[![Code Coverage](https://img.shields.io/codecov/c/github/gocolly/colly/master.svg)](https://codecov.io/github/gocolly/colly?branch=master)
+[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fgocolly%2Fcolly.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fgocolly%2Fcolly?ref=badge_shield)
+[![Twitter URL](https://img.shields.io/badge/twitter-follow-green.svg)](https://twitter.com/gocolly)
+
+
+## Features
+
+ * Clean API
+ * Fast (>1k request/sec on a single core)
+ * Manages request delays and maximum concurrency per domain
+ * Automatic cookie and session handling
+ * Sync/async/parallel scraping
+ * Caching
+ * Automatic encoding of non-unicode responses
+ * Robots.txt support
+ * Distributed scraping
+ * Configuration via environment variables
+ * Extensions
+
+
+## Example
+
+```go
+func main() {
+ c := colly.NewCollector()
+
+ // Find and visit all links
+ c.OnHTML("a[href]", func(e *colly.HTMLElement) {
+ e.Request.Visit(e.Attr("href"))
+ })
+
+ c.OnRequest(func(r *colly.Request) {
+ fmt.Println("Visiting", r.URL)
+ })
+
+ c.Visit("http://go-colly.org/")
+}
+```
+
+See [examples folder](https://github.com/gocolly/colly/tree/master/_examples) for more detailed examples.
+
+
+## Installation
+
+```
+go get -u github.com/gocolly/colly/...
+```
+
+
+## Bugs
+
+Bugs or suggestions? Visit the [issue tracker](https://github.com/gocolly/colly/issues) or join `#colly` on freenode
+
+
+## Other Projects Using Colly
+
+Below is a list of public, open source projects that use Colly:
+
+ * [greenpeace/check-my-pages](https://github.com/greenpeace/check-my-pages) Scraping script to test the Spanish Greenpeace web archive
+ * [altsab/gowap](https://github.com/altsab/gowap) Wappalyzer implementation in Go
+ * [jesuiscamille/goquotes](https://github.com/jesuiscamille/goquotes) A quotes scrapper, making your day a little better!
+ * [jivesearch/jivesearch](https://github.com/jivesearch/jivesearch) A search engine that doesn't track you.
+ * [Leagify/colly-draft-prospects](https://github.com/Leagify/colly-draft-prospects) A scraper for future NFL Draft prospects.
+ * [lucasepe/go-ps4](https://github.com/lucasepe/go-ps4) Search playstation store for your favorite PS4 games using the command line.
+
+If you are using Colly in a project please send a pull request to add it to the list.
+
+## Contributors
+
+This project exists thanks to all the people who contribute. [[Contribute]](CONTRIBUTING.md).
+
+
+
+## Backers
+
+Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/colly#backer)]
+
+
+
+
+## Sponsors
+
+Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/colly#sponsor)]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+## License
+[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fgocolly%2Fcolly.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fgocolly%2Fcolly?ref=badge_large)
diff --git a/vendor/github.com/gocolly/colly/VERSION b/vendor/github.com/gocolly/colly/VERSION
new file mode 100644
index 000000000..26aaba0e8
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/VERSION
@@ -0,0 +1 @@
+1.2.0
diff --git a/vendor/github.com/gocolly/colly/colly.go b/vendor/github.com/gocolly/colly/colly.go
new file mode 100644
index 000000000..3fb64db78
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/colly.go
@@ -0,0 +1,1293 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package colly implements a HTTP scraping framework
+package colly
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "hash/fnv"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/http/cookiejar"
+ "net/url"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "google.golang.org/appengine/urlfetch"
+
+ "github.com/PuerkitoBio/goquery"
+ "github.com/antchfx/htmlquery"
+ "github.com/antchfx/xmlquery"
+ "github.com/kennygrant/sanitize"
+ "github.com/temoto/robotstxt"
+
+ "github.com/gocolly/colly/debug"
+ "github.com/gocolly/colly/storage"
+)
+
+// Collector provides the scraper instance for a scraping job
+type Collector struct {
+ // UserAgent is the User-Agent string used by HTTP requests
+ UserAgent string
+ // MaxDepth limits the recursion depth of visited URLs.
+ // Set it to 0 for infinite recursion (default).
+ MaxDepth int
+ // AllowedDomains is a domain whitelist.
+ // Leave it blank to allow any domains to be visited
+ AllowedDomains []string
+ // DisallowedDomains is a domain blacklist.
+ DisallowedDomains []string
+ // DisallowedURLFilters is a list of regular expressions which restricts
+ // visiting URLs. If any of the rules matches to a URL the
+ // request will be stopped. DisallowedURLFilters will
+ // be evaluated before URLFilters
+ // Leave it blank to allow any URLs to be visited
+ DisallowedURLFilters []*regexp.Regexp
+ // URLFilters is a list of regular expressions which restricts
+ // visiting URLs. If any of the rules matches to a URL the
+ // request won't be stopped. DisallowedURLFilters will
+ // be evaluated before URLFilters
+
+ // Leave it blank to allow any URLs to be visited
+ URLFilters []*regexp.Regexp
+
+ // AllowURLRevisit allows multiple downloads of the same URL
+ AllowURLRevisit bool
+ // MaxBodySize is the limit of the retrieved response body in bytes.
+ // 0 means unlimited.
+ // The default value for MaxBodySize is 10MB (10 * 1024 * 1024 bytes).
+ MaxBodySize int
+ // CacheDir specifies a location where GET requests are cached as files.
+ // When it's not defined, caching is disabled.
+ CacheDir string
+ // IgnoreRobotsTxt allows the Collector to ignore any restrictions set by
+ // the target host's robots.txt file. See http://www.robotstxt.org/ for more
+ // information.
+ IgnoreRobotsTxt bool
+ // Async turns on asynchronous network communication. Use Collector.Wait() to
+ // be sure all requests have been finished.
+ Async bool
+ // ParseHTTPErrorResponse allows parsing HTTP responses with non 2xx status codes.
+ // By default, Colly parses only successful HTTP responses. Set ParseHTTPErrorResponse
+ // to true to enable it.
+ ParseHTTPErrorResponse bool
+ // ID is the unique identifier of a collector
+ ID uint32
+ // DetectCharset can enable character encoding detection for non-utf8 response bodies
+ // without explicit charset declaration. This feature uses https://github.com/saintfish/chardet
+ DetectCharset bool
+ // RedirectHandler allows control on how a redirect will be managed
+ RedirectHandler func(req *http.Request, via []*http.Request) error
+ // CheckHead performs a HEAD request before every GET to pre-validate the response
+ CheckHead bool
+ store storage.Storage
+ debugger debug.Debugger
+ robotsMap map[string]*robotstxt.RobotsData
+ htmlCallbacks []*htmlCallbackContainer
+ xmlCallbacks []*xmlCallbackContainer
+ requestCallbacks []RequestCallback
+ responseCallbacks []ResponseCallback
+ errorCallbacks []ErrorCallback
+ scrapedCallbacks []ScrapedCallback
+ requestCount uint32
+ responseCount uint32
+ backend *httpBackend
+ wg *sync.WaitGroup
+ lock *sync.RWMutex
+}
+
+// RequestCallback is a type alias for OnRequest callback functions
+type RequestCallback func(*Request)
+
+// ResponseCallback is a type alias for OnResponse callback functions
+type ResponseCallback func(*Response)
+
+// HTMLCallback is a type alias for OnHTML callback functions
+type HTMLCallback func(*HTMLElement)
+
+// XMLCallback is a type alias for OnXML callback functions
+type XMLCallback func(*XMLElement)
+
+// ErrorCallback is a type alias for OnError callback functions
+type ErrorCallback func(*Response, error)
+
+// ScrapedCallback is a type alias for OnScraped callback functions
+type ScrapedCallback func(*Response)
+
+// ProxyFunc is a type alias for proxy setter functions.
+type ProxyFunc func(*http.Request) (*url.URL, error)
+
+type htmlCallbackContainer struct {
+ Selector string
+ Function HTMLCallback
+}
+
+type xmlCallbackContainer struct {
+ Query string
+ Function XMLCallback
+}
+
+type cookieJarSerializer struct {
+ store storage.Storage
+ lock *sync.RWMutex
+}
+
+var collectorCounter uint32
+
+// The key type is unexported to prevent collisions with context keys defined in
+// other packages.
+type key int
+
+// ProxyURLKey is the context key for the request proxy address.
+const ProxyURLKey key = iota
+
+var (
+ // ErrForbiddenDomain is the error thrown if visiting
+ // a domain which is not allowed in AllowedDomains
+ ErrForbiddenDomain = errors.New("Forbidden domain")
+ // ErrMissingURL is the error type for missing URL errors
+ ErrMissingURL = errors.New("Missing URL")
+ // ErrMaxDepth is the error type for exceeding max depth
+ ErrMaxDepth = errors.New("Max depth limit reached")
+ // ErrForbiddenURL is the error thrown if visiting
+ // a URL which is not allowed by URLFilters
+ ErrForbiddenURL = errors.New("ForbiddenURL")
+
+ // ErrNoURLFiltersMatch is the error thrown if visiting
+ // a URL which is not allowed by URLFilters
+ ErrNoURLFiltersMatch = errors.New("No URLFilters match")
+ // ErrAlreadyVisited is the error type for already visited URLs
+ ErrAlreadyVisited = errors.New("URL already visited")
+ // ErrRobotsTxtBlocked is the error type for robots.txt errors
+ ErrRobotsTxtBlocked = errors.New("URL blocked by robots.txt")
+ // ErrNoCookieJar is the error type for missing cookie jar
+ ErrNoCookieJar = errors.New("Cookie jar is not available")
+ // ErrNoPattern is the error type for LimitRules without patterns
+ ErrNoPattern = errors.New("No pattern defined in LimitRule")
+)
+
+var envMap = map[string]func(*Collector, string){
+ "ALLOWED_DOMAINS": func(c *Collector, val string) {
+ c.AllowedDomains = strings.Split(val, ",")
+ },
+ "CACHE_DIR": func(c *Collector, val string) {
+ c.CacheDir = val
+ },
+ "DETECT_CHARSET": func(c *Collector, val string) {
+ c.DetectCharset = isYesString(val)
+ },
+ "DISABLE_COOKIES": func(c *Collector, _ string) {
+ c.backend.Client.Jar = nil
+ },
+ "DISALLOWED_DOMAINS": func(c *Collector, val string) {
+ c.DisallowedDomains = strings.Split(val, ",")
+ },
+ "IGNORE_ROBOTSTXT": func(c *Collector, val string) {
+ c.IgnoreRobotsTxt = isYesString(val)
+ },
+ "FOLLOW_REDIRECTS": func(c *Collector, val string) {
+ if !isYesString(val) {
+ c.RedirectHandler = func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ }
+ }
+ },
+ "MAX_BODY_SIZE": func(c *Collector, val string) {
+ size, err := strconv.Atoi(val)
+ if err == nil {
+ c.MaxBodySize = size
+ }
+ },
+ "MAX_DEPTH": func(c *Collector, val string) {
+ maxDepth, err := strconv.Atoi(val)
+ if err != nil {
+ c.MaxDepth = maxDepth
+ }
+ },
+ "PARSE_HTTP_ERROR_RESPONSE": func(c *Collector, val string) {
+ c.ParseHTTPErrorResponse = isYesString(val)
+ },
+ "USER_AGENT": func(c *Collector, val string) {
+ c.UserAgent = val
+ },
+}
+
+// NewCollector creates a new Collector instance with default configuration
+func NewCollector(options ...func(*Collector)) *Collector {
+ c := &Collector{}
+ c.Init()
+
+ for _, f := range options {
+ f(c)
+ }
+
+ c.parseSettingsFromEnv()
+
+ return c
+}
+
+// UserAgent sets the user agent used by the Collector.
+func UserAgent(ua string) func(*Collector) {
+ return func(c *Collector) {
+ c.UserAgent = ua
+ }
+}
+
+// MaxDepth limits the recursion depth of visited URLs.
+func MaxDepth(depth int) func(*Collector) {
+ return func(c *Collector) {
+ c.MaxDepth = depth
+ }
+}
+
+// AllowedDomains sets the domain whitelist used by the Collector.
+func AllowedDomains(domains ...string) func(*Collector) {
+ return func(c *Collector) {
+ c.AllowedDomains = domains
+ }
+}
+
+// ParseHTTPErrorResponse allows parsing responses with HTTP errors
+func ParseHTTPErrorResponse() func(*Collector) {
+ return func(c *Collector) {
+ c.ParseHTTPErrorResponse = true
+ }
+}
+
+// DisallowedDomains sets the domain blacklist used by the Collector.
+func DisallowedDomains(domains ...string) func(*Collector) {
+ return func(c *Collector) {
+ c.DisallowedDomains = domains
+ }
+}
+
+// DisallowedURLFilters sets the list of regular expressions which restricts
+// visiting URLs. If any of the rules matches to a URL the request will be stopped.
+func DisallowedURLFilters(filters ...*regexp.Regexp) func(*Collector) {
+ return func(c *Collector) {
+ c.DisallowedURLFilters = filters
+ }
+}
+
+// URLFilters sets the list of regular expressions which restricts
+// visiting URLs. If any of the rules matches to a URL the request won't be stopped.
+func URLFilters(filters ...*regexp.Regexp) func(*Collector) {
+ return func(c *Collector) {
+ c.URLFilters = filters
+ }
+}
+
+// AllowURLRevisit instructs the Collector to allow multiple downloads of the same URL
+func AllowURLRevisit() func(*Collector) {
+ return func(c *Collector) {
+ c.AllowURLRevisit = true
+ }
+}
+
+// MaxBodySize sets the limit of the retrieved response body in bytes.
+func MaxBodySize(sizeInBytes int) func(*Collector) {
+ return func(c *Collector) {
+ c.MaxBodySize = sizeInBytes
+ }
+}
+
+// CacheDir specifies the location where GET requests are cached as files.
+func CacheDir(path string) func(*Collector) {
+ return func(c *Collector) {
+ c.CacheDir = path
+ }
+}
+
+// IgnoreRobotsTxt instructs the Collector to ignore any restrictions
+// set by the target host's robots.txt file.
+func IgnoreRobotsTxt() func(*Collector) {
+ return func(c *Collector) {
+ c.IgnoreRobotsTxt = true
+ }
+}
+
+// ID sets the unique identifier of the Collector.
+func ID(id uint32) func(*Collector) {
+ return func(c *Collector) {
+ c.ID = id
+ }
+}
+
+// Async turns on asynchronous network requests.
+func Async(a bool) func(*Collector) {
+ return func(c *Collector) {
+ c.Async = a
+ }
+}
+
+// DetectCharset enables character encoding detection for non-utf8 response bodies
+// without explicit charset declaration. This feature uses https://github.com/saintfish/chardet
+func DetectCharset() func(*Collector) {
+ return func(c *Collector) {
+ c.DetectCharset = true
+ }
+}
+
+// Debugger sets the debugger used by the Collector.
+func Debugger(d debug.Debugger) func(*Collector) {
+ return func(c *Collector) {
+ d.Init()
+ c.debugger = d
+ }
+}
+
+// Init initializes the Collector's private variables and sets default
+// configuration for the Collector
+func (c *Collector) Init() {
+ c.UserAgent = "colly - https://github.com/gocolly/colly"
+ c.MaxDepth = 0
+ c.store = &storage.InMemoryStorage{}
+ c.store.Init()
+ c.MaxBodySize = 10 * 1024 * 1024
+ c.backend = &httpBackend{}
+ jar, _ := cookiejar.New(nil)
+ c.backend.Init(jar)
+ c.backend.Client.CheckRedirect = c.checkRedirectFunc()
+ c.wg = &sync.WaitGroup{}
+ c.lock = &sync.RWMutex{}
+ c.robotsMap = make(map[string]*robotstxt.RobotsData)
+ c.IgnoreRobotsTxt = true
+ c.ID = atomic.AddUint32(&collectorCounter, 1)
+}
+
+// Appengine will replace the Collector's backend http.Client
+// With an Http.Client that is provided by appengine/urlfetch
+// This function should be used when the scraper is run on
+// Google App Engine. Example:
+// func startScraper(w http.ResponseWriter, r *http.Request) {
+// ctx := appengine.NewContext(r)
+// c := colly.NewCollector()
+// c.Appengine(ctx)
+// ...
+// c.Visit("https://google.ca")
+// }
+func (c *Collector) Appengine(ctx context.Context) {
+ client := urlfetch.Client(ctx)
+ client.Jar = c.backend.Client.Jar
+ client.CheckRedirect = c.backend.Client.CheckRedirect
+ client.Timeout = c.backend.Client.Timeout
+
+ c.backend.Client = client
+}
+
+// Visit starts Collector's collecting job by creating a
+// request to the URL specified in parameter.
+// Visit also calls the previously provided callbacks
+func (c *Collector) Visit(URL string) error {
+ if c.CheckHead {
+ if check := c.scrape(URL, "HEAD", 1, nil, nil, nil, true); check != nil {
+ return check
+ }
+ }
+ return c.scrape(URL, "GET", 1, nil, nil, nil, true)
+}
+
+// Head starts a collector job by creating a HEAD request.
+func (c *Collector) Head(URL string) error {
+ return c.scrape(URL, "HEAD", 1, nil, nil, nil, false)
+}
+
+// Post starts a collector job by creating a POST request.
+// Post also calls the previously provided callbacks
+func (c *Collector) Post(URL string, requestData map[string]string) error {
+ return c.scrape(URL, "POST", 1, createFormReader(requestData), nil, nil, true)
+}
+
+// PostRaw starts a collector job by creating a POST request with raw binary data.
+// Post also calls the previously provided callbacks
+func (c *Collector) PostRaw(URL string, requestData []byte) error {
+ return c.scrape(URL, "POST", 1, bytes.NewReader(requestData), nil, nil, true)
+}
+
+// PostMultipart starts a collector job by creating a Multipart POST request
+// with raw binary data. PostMultipart also calls the previously provided callbacks
+func (c *Collector) PostMultipart(URL string, requestData map[string][]byte) error {
+ boundary := randomBoundary()
+ hdr := http.Header{}
+ hdr.Set("Content-Type", "multipart/form-data; boundary="+boundary)
+ hdr.Set("User-Agent", c.UserAgent)
+ return c.scrape(URL, "POST", 1, createMultipartReader(boundary, requestData), nil, hdr, true)
+}
+
+// Request starts a collector job by creating a custom HTTP request
+// where method, context, headers and request data can be specified.
+// Set requestData, ctx, hdr parameters to nil if you don't want to use them.
+// Valid methods:
+// - "GET"
+// - "HEAD"
+// - "POST"
+// - "PUT"
+// - "DELETE"
+// - "PATCH"
+// - "OPTIONS"
+func (c *Collector) Request(method, URL string, requestData io.Reader, ctx *Context, hdr http.Header) error {
+ return c.scrape(URL, method, 1, requestData, ctx, hdr, true)
+}
+
+// SetDebugger attaches a debugger to the collector
+func (c *Collector) SetDebugger(d debug.Debugger) {
+ d.Init()
+ c.debugger = d
+}
+
+// UnmarshalRequest creates a Request from serialized data
+func (c *Collector) UnmarshalRequest(r []byte) (*Request, error) {
+ req := &serializableRequest{}
+ err := json.Unmarshal(r, req)
+ if err != nil {
+ return nil, err
+ }
+
+ u, err := url.Parse(req.URL)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx := NewContext()
+ for k, v := range req.Ctx {
+ ctx.Put(k, v)
+ }
+
+ return &Request{
+ Method: req.Method,
+ URL: u,
+ Body: bytes.NewReader(req.Body),
+ Ctx: ctx,
+ ID: atomic.AddUint32(&c.requestCount, 1),
+ Headers: &req.Headers,
+ collector: c,
+ }, nil
+}
+
+func (c *Collector) scrape(u, method string, depth int, requestData io.Reader, ctx *Context, hdr http.Header, checkRevisit bool) error {
+ if err := c.requestCheck(u, method, depth, checkRevisit); err != nil {
+ return err
+ }
+ parsedURL, err := url.Parse(u)
+ if err != nil {
+ return err
+ }
+ if parsedURL.Scheme == "" {
+ parsedURL.Scheme = "http"
+ }
+ if !c.isDomainAllowed(parsedURL.Host) {
+ return ErrForbiddenDomain
+ }
+ if method != "HEAD" && !c.IgnoreRobotsTxt {
+ if err = c.checkRobots(parsedURL); err != nil {
+ return err
+ }
+ }
+ if hdr == nil {
+ hdr = http.Header{"User-Agent": []string{c.UserAgent}}
+ }
+ rc, ok := requestData.(io.ReadCloser)
+ if !ok && requestData != nil {
+ rc = ioutil.NopCloser(requestData)
+ }
+ req := &http.Request{
+ Method: method,
+ URL: parsedURL,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: hdr,
+ Body: rc,
+ Host: parsedURL.Host,
+ }
+ setRequestBody(req, requestData)
+ u = parsedURL.String()
+ c.wg.Add(1)
+ if c.Async {
+ go c.fetch(u, method, depth, requestData, ctx, hdr, req)
+ return nil
+ }
+ return c.fetch(u, method, depth, requestData, ctx, hdr, req)
+}
+
+func setRequestBody(req *http.Request, body io.Reader) {
+ if body != nil {
+ switch v := body.(type) {
+ case *bytes.Buffer:
+ req.ContentLength = int64(v.Len())
+ buf := v.Bytes()
+ req.GetBody = func() (io.ReadCloser, error) {
+ r := bytes.NewReader(buf)
+ return ioutil.NopCloser(r), nil
+ }
+ case *bytes.Reader:
+ req.ContentLength = int64(v.Len())
+ snapshot := *v
+ req.GetBody = func() (io.ReadCloser, error) {
+ r := snapshot
+ return ioutil.NopCloser(&r), nil
+ }
+ case *strings.Reader:
+ req.ContentLength = int64(v.Len())
+ snapshot := *v
+ req.GetBody = func() (io.ReadCloser, error) {
+ r := snapshot
+ return ioutil.NopCloser(&r), nil
+ }
+ }
+ if req.GetBody != nil && req.ContentLength == 0 {
+ req.Body = http.NoBody
+ req.GetBody = func() (io.ReadCloser, error) { return http.NoBody, nil }
+ }
+ }
+}
+
+func (c *Collector) fetch(u, method string, depth int, requestData io.Reader, ctx *Context, hdr http.Header, req *http.Request) error {
+ defer c.wg.Done()
+ if ctx == nil {
+ ctx = NewContext()
+ }
+ request := &Request{
+ URL: req.URL,
+ Headers: &req.Header,
+ Ctx: ctx,
+ Depth: depth,
+ Method: method,
+ Body: requestData,
+ collector: c,
+ ID: atomic.AddUint32(&c.requestCount, 1),
+ }
+
+ c.handleOnRequest(request)
+
+ if request.abort {
+ return nil
+ }
+
+ if method == "POST" && req.Header.Get("Content-Type") == "" {
+ req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
+ }
+
+ if req.Header.Get("Accept") == "" {
+ req.Header.Set("Accept", "*/*")
+ }
+
+ origURL := req.URL
+ response, err := c.backend.Cache(req, c.MaxBodySize, c.CacheDir)
+ if proxyURL, ok := req.Context().Value(ProxyURLKey).(string); ok {
+ request.ProxyURL = proxyURL
+ }
+ if err := c.handleOnError(response, err, request, ctx); err != nil {
+ return err
+ }
+ if req.URL != origURL {
+ request.URL = req.URL
+ request.Headers = &req.Header
+ }
+ atomic.AddUint32(&c.responseCount, 1)
+ response.Ctx = ctx
+ response.Request = request
+
+ err = response.fixCharset(c.DetectCharset, request.ResponseCharacterEncoding)
+ if err != nil {
+ return err
+ }
+
+ c.handleOnResponse(response)
+
+ err = c.handleOnHTML(response)
+ if err != nil {
+ c.handleOnError(response, err, request, ctx)
+ }
+
+ err = c.handleOnXML(response)
+ if err != nil {
+ c.handleOnError(response, err, request, ctx)
+ }
+
+ c.handleOnScraped(response)
+
+ return err
+}
+
+func (c *Collector) requestCheck(u, method string, depth int, checkRevisit bool) error {
+ if u == "" {
+ return ErrMissingURL
+ }
+ if c.MaxDepth > 0 && c.MaxDepth < depth {
+ return ErrMaxDepth
+ }
+ if len(c.DisallowedURLFilters) > 0 {
+ if isMatchingFilter(c.DisallowedURLFilters, []byte(u)) {
+ return ErrForbiddenURL
+ }
+ }
+ if len(c.URLFilters) > 0 {
+ if !isMatchingFilter(c.URLFilters, []byte(u)) {
+ return ErrNoURLFiltersMatch
+ }
+ }
+ if checkRevisit && !c.AllowURLRevisit && method == "GET" {
+ h := fnv.New64a()
+ h.Write([]byte(u))
+ uHash := h.Sum64()
+ visited, err := c.store.IsVisited(uHash)
+ if err != nil {
+ return err
+ }
+ if visited {
+ return ErrAlreadyVisited
+ }
+ return c.store.Visited(uHash)
+ }
+ return nil
+}
+
+func (c *Collector) isDomainAllowed(domain string) bool {
+ for _, d2 := range c.DisallowedDomains {
+ if d2 == domain {
+ return false
+ }
+ }
+ if c.AllowedDomains == nil || len(c.AllowedDomains) == 0 {
+ return true
+ }
+ for _, d2 := range c.AllowedDomains {
+ if d2 == domain {
+ return true
+ }
+ }
+ return false
+}
+
+func (c *Collector) checkRobots(u *url.URL) error {
+ c.lock.RLock()
+ robot, ok := c.robotsMap[u.Host]
+ c.lock.RUnlock()
+
+ if !ok {
+ // no robots file cached
+ resp, err := c.backend.Client.Get(u.Scheme + "://" + u.Host + "/robots.txt")
+ if err != nil {
+ return err
+ }
+ robot, err = robotstxt.FromResponse(resp)
+ if err != nil {
+ return err
+ }
+ c.lock.Lock()
+ c.robotsMap[u.Host] = robot
+ c.lock.Unlock()
+ }
+
+ uaGroup := robot.FindGroup(c.UserAgent)
+ if uaGroup == nil {
+ return nil
+ }
+
+ if !uaGroup.Test(u.EscapedPath()) {
+ return ErrRobotsTxtBlocked
+ }
+ return nil
+}
+
+// String is the text representation of the collector.
+// It contains useful debug information about the collector's internals
+func (c *Collector) String() string {
+ return fmt.Sprintf(
+ "Requests made: %d (%d responses) | Callbacks: OnRequest: %d, OnHTML: %d, OnResponse: %d, OnError: %d",
+ c.requestCount,
+ c.responseCount,
+ len(c.requestCallbacks),
+ len(c.htmlCallbacks),
+ len(c.responseCallbacks),
+ len(c.errorCallbacks),
+ )
+}
+
+// Wait returns when the collector jobs are finished
+func (c *Collector) Wait() {
+ c.wg.Wait()
+}
+
+// OnRequest registers a function. Function will be executed on every
+// request made by the Collector
+func (c *Collector) OnRequest(f RequestCallback) {
+ c.lock.Lock()
+ if c.requestCallbacks == nil {
+ c.requestCallbacks = make([]RequestCallback, 0, 4)
+ }
+ c.requestCallbacks = append(c.requestCallbacks, f)
+ c.lock.Unlock()
+}
+
+// OnResponse registers a function. Function will be executed on every response
+func (c *Collector) OnResponse(f ResponseCallback) {
+ c.lock.Lock()
+ if c.responseCallbacks == nil {
+ c.responseCallbacks = make([]ResponseCallback, 0, 4)
+ }
+ c.responseCallbacks = append(c.responseCallbacks, f)
+ c.lock.Unlock()
+}
+
+// OnHTML registers a function. Function will be executed on every HTML
+// element matched by the GoQuery Selector parameter.
+// GoQuery Selector is a selector used by https://github.com/PuerkitoBio/goquery
+func (c *Collector) OnHTML(goquerySelector string, f HTMLCallback) {
+ c.lock.Lock()
+ if c.htmlCallbacks == nil {
+ c.htmlCallbacks = make([]*htmlCallbackContainer, 0, 4)
+ }
+ c.htmlCallbacks = append(c.htmlCallbacks, &htmlCallbackContainer{
+ Selector: goquerySelector,
+ Function: f,
+ })
+ c.lock.Unlock()
+}
+
+// OnXML registers a function. Function will be executed on every XML
+// element matched by the xpath Query parameter.
+// xpath Query is used by https://github.com/antchfx/xmlquery
+func (c *Collector) OnXML(xpathQuery string, f XMLCallback) {
+ c.lock.Lock()
+ if c.xmlCallbacks == nil {
+ c.xmlCallbacks = make([]*xmlCallbackContainer, 0, 4)
+ }
+ c.xmlCallbacks = append(c.xmlCallbacks, &xmlCallbackContainer{
+ Query: xpathQuery,
+ Function: f,
+ })
+ c.lock.Unlock()
+}
+
+// OnHTMLDetach deregister a function. Function will not be execute after detached
+func (c *Collector) OnHTMLDetach(goquerySelector string) {
+ c.lock.Lock()
+ deleteIdx := -1
+ for i, cc := range c.htmlCallbacks {
+ if cc.Selector == goquerySelector {
+ deleteIdx = i
+ break
+ }
+ }
+ if deleteIdx != -1 {
+ c.htmlCallbacks = append(c.htmlCallbacks[:deleteIdx], c.htmlCallbacks[deleteIdx+1:]...)
+ }
+ c.lock.Unlock()
+}
+
+// OnXMLDetach deregister a function. Function will not be execute after detached
+func (c *Collector) OnXMLDetach(xpathQuery string) {
+ c.lock.Lock()
+ deleteIdx := -1
+ for i, cc := range c.xmlCallbacks {
+ if cc.Query == xpathQuery {
+ deleteIdx = i
+ break
+ }
+ }
+ if deleteIdx != -1 {
+ c.xmlCallbacks = append(c.xmlCallbacks[:deleteIdx], c.xmlCallbacks[deleteIdx+1:]...)
+ }
+ c.lock.Unlock()
+}
+
+// OnError registers a function. Function will be executed if an error
+// occurs during the HTTP request.
+func (c *Collector) OnError(f ErrorCallback) {
+ c.lock.Lock()
+ if c.errorCallbacks == nil {
+ c.errorCallbacks = make([]ErrorCallback, 0, 4)
+ }
+ c.errorCallbacks = append(c.errorCallbacks, f)
+ c.lock.Unlock()
+}
+
+// OnScraped registers a function. Function will be executed after
+// OnHTML, as a final part of the scraping.
+func (c *Collector) OnScraped(f ScrapedCallback) {
+ c.lock.Lock()
+ if c.scrapedCallbacks == nil {
+ c.scrapedCallbacks = make([]ScrapedCallback, 0, 4)
+ }
+ c.scrapedCallbacks = append(c.scrapedCallbacks, f)
+ c.lock.Unlock()
+}
+
+// WithTransport allows you to set a custom http.RoundTripper (transport)
+func (c *Collector) WithTransport(transport http.RoundTripper) {
+ c.backend.Client.Transport = transport
+}
+
+// DisableCookies turns off cookie handling
+func (c *Collector) DisableCookies() {
+ c.backend.Client.Jar = nil
+}
+
+// SetCookieJar overrides the previously set cookie jar
+func (c *Collector) SetCookieJar(j *cookiejar.Jar) {
+ c.backend.Client.Jar = j
+}
+
+// SetRequestTimeout overrides the default timeout (10 seconds) for this collector
+func (c *Collector) SetRequestTimeout(timeout time.Duration) {
+ c.backend.Client.Timeout = timeout
+}
+
+// SetStorage overrides the default in-memory storage.
+// Storage stores scraping related data like cookies and visited urls
+func (c *Collector) SetStorage(s storage.Storage) error {
+ if err := s.Init(); err != nil {
+ return err
+ }
+ c.store = s
+ c.backend.Client.Jar = createJar(s)
+ return nil
+}
+
+// SetProxy sets a proxy for the collector. This method overrides the previously
+// used http.Transport if the type of the transport is not http.RoundTripper.
+// The proxy type is determined by the URL scheme. "http"
+// and "socks5" are supported. If the scheme is empty,
+// "http" is assumed.
+func (c *Collector) SetProxy(proxyURL string) error {
+ proxyParsed, err := url.Parse(proxyURL)
+ if err != nil {
+ return err
+ }
+
+ c.SetProxyFunc(http.ProxyURL(proxyParsed))
+
+ return nil
+}
+
+// SetProxyFunc sets a custom proxy setter/switcher function.
+// See built-in ProxyFuncs for more details.
+// This method overrides the previously used http.Transport
+// if the type of the transport is not http.RoundTripper.
+// The proxy type is determined by the URL scheme. "http"
+// and "socks5" are supported. If the scheme is empty,
+// "http" is assumed.
+func (c *Collector) SetProxyFunc(p ProxyFunc) {
+ t, ok := c.backend.Client.Transport.(*http.Transport)
+ if c.backend.Client.Transport != nil && ok {
+ t.Proxy = p
+ } else {
+ c.backend.Client.Transport = &http.Transport{
+ Proxy: p,
+ }
+ }
+}
+
+func createEvent(eventType string, requestID, collectorID uint32, kvargs map[string]string) *debug.Event {
+ return &debug.Event{
+ CollectorID: collectorID,
+ RequestID: requestID,
+ Type: eventType,
+ Values: kvargs,
+ }
+}
+
+func (c *Collector) handleOnRequest(r *Request) {
+ if c.debugger != nil {
+ c.debugger.Event(createEvent("request", r.ID, c.ID, map[string]string{
+ "url": r.URL.String(),
+ }))
+ }
+ for _, f := range c.requestCallbacks {
+ f(r)
+ }
+}
+
+func (c *Collector) handleOnResponse(r *Response) {
+ if c.debugger != nil {
+ c.debugger.Event(createEvent("response", r.Request.ID, c.ID, map[string]string{
+ "url": r.Request.URL.String(),
+ "status": http.StatusText(r.StatusCode),
+ }))
+ }
+ for _, f := range c.responseCallbacks {
+ f(r)
+ }
+}
+
+func (c *Collector) handleOnHTML(resp *Response) error {
+ if len(c.htmlCallbacks) == 0 || !strings.Contains(strings.ToLower(resp.Headers.Get("Content-Type")), "html") {
+ return nil
+ }
+ doc, err := goquery.NewDocumentFromReader(bytes.NewBuffer(resp.Body))
+ if err != nil {
+ return err
+ }
+ if href, found := doc.Find("base[href]").Attr("href"); found {
+ resp.Request.baseURL, _ = url.Parse(href)
+ }
+ for _, cc := range c.htmlCallbacks {
+ i := 0
+ doc.Find(cc.Selector).Each(func(_ int, s *goquery.Selection) {
+ for _, n := range s.Nodes {
+ e := NewHTMLElementFromSelectionNode(resp, s, n, i)
+ i++
+ if c.debugger != nil {
+ c.debugger.Event(createEvent("html", resp.Request.ID, c.ID, map[string]string{
+ "selector": cc.Selector,
+ "url": resp.Request.URL.String(),
+ }))
+ }
+ cc.Function(e)
+ }
+ })
+ }
+ return nil
+}
+
+func (c *Collector) handleOnXML(resp *Response) error {
+ if len(c.xmlCallbacks) == 0 {
+ return nil
+ }
+ contentType := strings.ToLower(resp.Headers.Get("Content-Type"))
+ if !strings.Contains(contentType, "html") && !strings.Contains(contentType, "xml") {
+ return nil
+ }
+
+ if strings.Contains(contentType, "html") {
+ doc, err := htmlquery.Parse(bytes.NewBuffer(resp.Body))
+ if err != nil {
+ return err
+ }
+ if e := htmlquery.FindOne(doc, "//base"); e != nil {
+ for _, a := range e.Attr {
+ if a.Key == "href" {
+ resp.Request.baseURL, _ = url.Parse(a.Val)
+ break
+ }
+ }
+ }
+
+ for _, cc := range c.xmlCallbacks {
+ for _, n := range htmlquery.Find(doc, cc.Query) {
+ e := NewXMLElementFromHTMLNode(resp, n)
+ if c.debugger != nil {
+ c.debugger.Event(createEvent("xml", resp.Request.ID, c.ID, map[string]string{
+ "selector": cc.Query,
+ "url": resp.Request.URL.String(),
+ }))
+ }
+ cc.Function(e)
+ }
+ }
+ } else if strings.Contains(contentType, "xml") {
+ doc, err := xmlquery.Parse(bytes.NewBuffer(resp.Body))
+ if err != nil {
+ return err
+ }
+
+ for _, cc := range c.xmlCallbacks {
+ xmlquery.FindEach(doc, cc.Query, func(i int, n *xmlquery.Node) {
+ e := NewXMLElementFromXMLNode(resp, n)
+ if c.debugger != nil {
+ c.debugger.Event(createEvent("xml", resp.Request.ID, c.ID, map[string]string{
+ "selector": cc.Query,
+ "url": resp.Request.URL.String(),
+ }))
+ }
+ cc.Function(e)
+ })
+ }
+ }
+ return nil
+}
+
+func (c *Collector) handleOnError(response *Response, err error, request *Request, ctx *Context) error {
+ if err == nil && (c.ParseHTTPErrorResponse || response.StatusCode < 203) {
+ return nil
+ }
+ if err == nil && response.StatusCode >= 203 {
+ err = errors.New(http.StatusText(response.StatusCode))
+ }
+ if response == nil {
+ response = &Response{
+ Request: request,
+ Ctx: ctx,
+ }
+ }
+ if c.debugger != nil {
+ c.debugger.Event(createEvent("error", request.ID, c.ID, map[string]string{
+ "url": request.URL.String(),
+ "status": http.StatusText(response.StatusCode),
+ }))
+ }
+ if response.Request == nil {
+ response.Request = request
+ }
+ if response.Ctx == nil {
+ response.Ctx = request.Ctx
+ }
+ for _, f := range c.errorCallbacks {
+ f(response, err)
+ }
+ return err
+}
+
+func (c *Collector) handleOnScraped(r *Response) {
+ if c.debugger != nil {
+ c.debugger.Event(createEvent("scraped", r.Request.ID, c.ID, map[string]string{
+ "url": r.Request.URL.String(),
+ }))
+ }
+ for _, f := range c.scrapedCallbacks {
+ f(r)
+ }
+}
+
+// Limit adds a new LimitRule to the collector
+func (c *Collector) Limit(rule *LimitRule) error {
+ return c.backend.Limit(rule)
+}
+
+// Limits adds new LimitRules to the collector
+func (c *Collector) Limits(rules []*LimitRule) error {
+ return c.backend.Limits(rules)
+}
+
+// SetCookies handles the receipt of the cookies in a reply for the given URL
+func (c *Collector) SetCookies(URL string, cookies []*http.Cookie) error {
+ if c.backend.Client.Jar == nil {
+ return ErrNoCookieJar
+ }
+ u, err := url.Parse(URL)
+ if err != nil {
+ return err
+ }
+ c.backend.Client.Jar.SetCookies(u, cookies)
+ return nil
+}
+
+// Cookies returns the cookies to send in a request for the given URL.
+func (c *Collector) Cookies(URL string) []*http.Cookie {
+ if c.backend.Client.Jar == nil {
+ return nil
+ }
+ u, err := url.Parse(URL)
+ if err != nil {
+ return nil
+ }
+ return c.backend.Client.Jar.Cookies(u)
+}
+
+// Clone creates an exact copy of a Collector without callbacks.
+// HTTP backend, robots.txt cache and cookie jar are shared
+// between collectors.
+func (c *Collector) Clone() *Collector {
+ return &Collector{
+ AllowedDomains: c.AllowedDomains,
+ AllowURLRevisit: c.AllowURLRevisit,
+ CacheDir: c.CacheDir,
+ DetectCharset: c.DetectCharset,
+ DisallowedDomains: c.DisallowedDomains,
+ ID: atomic.AddUint32(&collectorCounter, 1),
+ IgnoreRobotsTxt: c.IgnoreRobotsTxt,
+ MaxBodySize: c.MaxBodySize,
+ MaxDepth: c.MaxDepth,
+ DisallowedURLFilters: c.DisallowedURLFilters,
+ URLFilters: c.URLFilters,
+ ParseHTTPErrorResponse: c.ParseHTTPErrorResponse,
+ UserAgent: c.UserAgent,
+ store: c.store,
+ backend: c.backend,
+ debugger: c.debugger,
+ Async: c.Async,
+ RedirectHandler: c.RedirectHandler,
+ errorCallbacks: make([]ErrorCallback, 0, 8),
+ htmlCallbacks: make([]*htmlCallbackContainer, 0, 8),
+ xmlCallbacks: make([]*xmlCallbackContainer, 0, 8),
+ scrapedCallbacks: make([]ScrapedCallback, 0, 8),
+ lock: c.lock,
+ requestCallbacks: make([]RequestCallback, 0, 8),
+ responseCallbacks: make([]ResponseCallback, 0, 8),
+ robotsMap: c.robotsMap,
+ wg: &sync.WaitGroup{},
+ }
+}
+
+func (c *Collector) checkRedirectFunc() func(req *http.Request, via []*http.Request) error {
+ return func(req *http.Request, via []*http.Request) error {
+ if !c.isDomainAllowed(req.URL.Host) {
+ return fmt.Errorf("Not following redirect to %s because its not in AllowedDomains", req.URL.Host)
+ }
+
+ if c.RedirectHandler != nil {
+ return c.RedirectHandler(req, via)
+ }
+
+ // Honor golangs default of maximum of 10 redirects
+ if len(via) >= 10 {
+ return http.ErrUseLastResponse
+ }
+
+ lastRequest := via[len(via)-1]
+
+ // Copy the headers from last request
+ for hName, hValues := range lastRequest.Header {
+ for _, hValue := range hValues {
+ req.Header.Set(hName, hValue)
+ }
+ }
+
+ // If domain has changed, remove the Authorization-header if it exists
+ if req.URL.Host != lastRequest.URL.Host {
+ req.Header.Del("Authorization")
+ }
+
+ return nil
+ }
+}
+
+func (c *Collector) parseSettingsFromEnv() {
+ for _, e := range os.Environ() {
+ if !strings.HasPrefix(e, "COLLY_") {
+ continue
+ }
+ pair := strings.SplitN(e[6:], "=", 2)
+ if f, ok := envMap[pair[0]]; ok {
+ f(c, pair[1])
+ } else {
+ log.Println("Unknown environment variable:", pair[0])
+ }
+ }
+}
+
+// SanitizeFileName replaces dangerous characters in a string
+// so the return value can be used as a safe file name.
+func SanitizeFileName(fileName string) string {
+ ext := filepath.Ext(fileName)
+ cleanExt := sanitize.BaseName(ext)
+ if cleanExt == "" {
+ cleanExt = ".unknown"
+ }
+ return strings.Replace(fmt.Sprintf(
+ "%s.%s",
+ sanitize.BaseName(fileName[:len(fileName)-len(ext)]),
+ cleanExt[1:],
+ ), "-", "_", -1)
+}
+
+func createFormReader(data map[string]string) io.Reader {
+ form := url.Values{}
+ for k, v := range data {
+ form.Add(k, v)
+ }
+ return strings.NewReader(form.Encode())
+}
+
+func createMultipartReader(boundary string, data map[string][]byte) io.Reader {
+ dashBoundary := "--" + boundary
+
+ body := []byte{}
+ buffer := bytes.NewBuffer(body)
+
+ buffer.WriteString("Content-type: multipart/form-data; boundary=" + boundary + "\n\n")
+ for contentType, content := range data {
+ buffer.WriteString(dashBoundary + "\n")
+ buffer.WriteString("Content-Disposition: form-data; name=" + contentType + "\n")
+ buffer.WriteString(fmt.Sprintf("Content-Length: %d \n\n", len(content)))
+ buffer.Write(content)
+ buffer.WriteString("\n")
+ }
+ buffer.WriteString(dashBoundary + "--\n\n")
+ return buffer
+}
+
+// randomBoundary was borrowed from
+// github.com/golang/go/mime/multipart/writer.go#randomBoundary
+func randomBoundary() string {
+ var buf [30]byte
+ _, err := io.ReadFull(rand.Reader, buf[:])
+ if err != nil {
+ panic(err)
+ }
+ return fmt.Sprintf("%x", buf[:])
+}
+
+func isYesString(s string) bool {
+ switch strings.ToLower(s) {
+ case "1", "yes", "true", "y":
+ return true
+ }
+ return false
+}
+
+func createJar(s storage.Storage) http.CookieJar {
+ return &cookieJarSerializer{store: s, lock: &sync.RWMutex{}}
+}
+
+func (j *cookieJarSerializer) SetCookies(u *url.URL, cookies []*http.Cookie) {
+ j.lock.Lock()
+ defer j.lock.Unlock()
+ cookieStr := j.store.Cookies(u)
+
+ // Merge existing cookies, new cookies have precedence.
+ cnew := make([]*http.Cookie, len(cookies))
+ copy(cnew, cookies)
+ existing := storage.UnstringifyCookies(cookieStr)
+ for _, c := range existing {
+ if !storage.ContainsCookie(cnew, c.Name) {
+ cnew = append(cnew, c)
+ }
+ }
+ j.store.SetCookies(u, storage.StringifyCookies(cnew))
+}
+
+func (j *cookieJarSerializer) Cookies(u *url.URL) []*http.Cookie {
+ cookies := storage.UnstringifyCookies(j.store.Cookies(u))
+ // Filter.
+ now := time.Now()
+ cnew := make([]*http.Cookie, 0, len(cookies))
+ for _, c := range cookies {
+ // Drop expired cookies.
+ if c.RawExpires != "" && c.Expires.Before(now) {
+ continue
+ }
+ // Drop secure cookies if not over https.
+ if c.Secure && u.Scheme != "https" {
+ continue
+ }
+ cnew = append(cnew, c)
+ }
+ return cnew
+}
+
+func isMatchingFilter(fs []*regexp.Regexp, d []byte) bool {
+ for _, r := range fs {
+ if r.Match(d) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/gocolly/colly/context.go b/vendor/github.com/gocolly/colly/context.go
new file mode 100644
index 000000000..4bc11b95e
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/context.go
@@ -0,0 +1,87 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package colly
+
+import (
+ "sync"
+)
+
+// Context provides a tiny layer for passing data between callbacks
+type Context struct {
+ contextMap map[string]interface{}
+ lock *sync.RWMutex
+}
+
+// NewContext initializes a new Context instance
+func NewContext() *Context {
+ return &Context{
+ contextMap: make(map[string]interface{}),
+ lock: &sync.RWMutex{},
+ }
+}
+
+// UnmarshalBinary decodes Context value to nil
+// This function is used by request caching
+func (c *Context) UnmarshalBinary(_ []byte) error {
+ return nil
+}
+
+// MarshalBinary encodes Context value
+// This function is used by request caching
+func (c *Context) MarshalBinary() (_ []byte, _ error) {
+ return nil, nil
+}
+
+// Put stores a value of any type in Context
+func (c *Context) Put(key string, value interface{}) {
+ c.lock.Lock()
+ c.contextMap[key] = value
+ c.lock.Unlock()
+}
+
+// Get retrieves a string value from Context.
+// Get returns an empty string if key not found
+func (c *Context) Get(key string) string {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ if v, ok := c.contextMap[key]; ok {
+ return v.(string)
+ }
+ return ""
+}
+
+// GetAny retrieves a value from Context.
+// GetAny returns nil if key not found
+func (c *Context) GetAny(key string) interface{} {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ if v, ok := c.contextMap[key]; ok {
+ return v
+ }
+ return nil
+}
+
+// ForEach iterate context
+func (c *Context) ForEach(fn func(k string, v interface{}) interface{}) []interface{} {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+
+ ret := make([]interface{}, 0, len(c.contextMap))
+ for k, v := range c.contextMap {
+ ret = append(ret, fn(k, v))
+ }
+
+ return ret
+}
diff --git a/vendor/github.com/gocolly/colly/debug/debug.go b/vendor/github.com/gocolly/colly/debug/debug.go
new file mode 100644
index 000000000..705d0f7ae
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/debug/debug.go
@@ -0,0 +1,36 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debug
+
+// Event represents an action inside a collector
+type Event struct {
+ // Type is the type of the event
+ Type string
+ // RequestID identifies the HTTP request of the Event
+ RequestID uint32
+ // CollectorID identifies the collector of the Event
+ CollectorID uint32
+ // Values contains the event's key-value pairs. Different type of events
+ // can return different key-value pairs
+ Values map[string]string
+}
+
+// Debugger is an interface for different type of debugging backends
+type Debugger interface {
+ // Init initializes the backend
+ Init() error
+ // Event receives a new collector event.
+ Event(e *Event)
+}
diff --git a/vendor/github.com/gocolly/colly/debug/logdebugger.go b/vendor/github.com/gocolly/colly/debug/logdebugger.go
new file mode 100644
index 000000000..f866b6d8a
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/debug/logdebugger.go
@@ -0,0 +1,54 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debug
+
+import (
+ "io"
+ "log"
+ "os"
+ "sync/atomic"
+ "time"
+)
+
+// LogDebugger is the simplest debugger which prints log messages to the STDERR
+type LogDebugger struct {
+ // Output is the log destination, anything can be used which implements them
+ // io.Writer interface. Leave it blank to use STDERR
+ Output io.Writer
+ // Prefix appears at the beginning of each generated log line
+ Prefix string
+ // Flag defines the logging properties.
+ Flag int
+ logger *log.Logger
+ counter int32
+ start time.Time
+}
+
+// Init initializes the LogDebugger
+func (l *LogDebugger) Init() error {
+ l.counter = 0
+ l.start = time.Now()
+ if l.Output == nil {
+ l.Output = os.Stderr
+ }
+ l.logger = log.New(l.Output, l.Prefix, l.Flag)
+ return nil
+}
+
+// Event receives Collector events and prints them to STDERR
+func (l *LogDebugger) Event(e *Event) {
+ i := atomic.AddInt32(&l.counter, 1)
+ l.logger.Printf("[%06d] %d [%6d - %s] %q (%s)\n", i, e.CollectorID, e.RequestID, e.Type, e.Values, time.Since(l.start))
+}
diff --git a/vendor/github.com/gocolly/colly/debug/webdebugger.go b/vendor/github.com/gocolly/colly/debug/webdebugger.go
new file mode 100644
index 000000000..e246361e1
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/debug/webdebugger.go
@@ -0,0 +1,146 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debug
+
+import (
+ "encoding/json"
+ "log"
+ "net/http"
+ "time"
+)
+
+// WebDebugger is a web based debuging frontend for colly
+type WebDebugger struct {
+ // Address is the address of the web server. It is 127.0.0.1:7676 by default.
+ Address string
+ initialized bool
+ CurrentRequests map[uint32]requestInfo
+ RequestLog []requestInfo
+}
+
+type requestInfo struct {
+ URL string
+ Started time.Time
+ Duration time.Duration
+ ResponseStatus string
+ ID uint32
+ CollectorID uint32
+}
+
+// Init initializes the WebDebugger
+func (w *WebDebugger) Init() error {
+ if w.initialized {
+ return nil
+ }
+ defer func() {
+ w.initialized = true
+ }()
+ if w.Address == "" {
+ w.Address = "127.0.0.1:7676"
+ }
+ w.RequestLog = make([]requestInfo, 0)
+ w.CurrentRequests = make(map[uint32]requestInfo)
+ http.HandleFunc("/", w.indexHandler)
+ http.HandleFunc("/status", w.statusHandler)
+ log.Println("Starting debug webserver on", w.Address)
+ go http.ListenAndServe(w.Address, nil)
+ return nil
+}
+
+// Event updates the debugger's status
+func (w *WebDebugger) Event(e *Event) {
+ switch e.Type {
+ case "request":
+ w.CurrentRequests[e.RequestID] = requestInfo{
+ URL: e.Values["url"],
+ Started: time.Now(),
+ ID: e.RequestID,
+ CollectorID: e.CollectorID,
+ }
+ case "response", "error":
+ r := w.CurrentRequests[e.RequestID]
+ r.Duration = time.Since(r.Started)
+ r.ResponseStatus = e.Values["status"]
+ w.RequestLog = append(w.RequestLog, r)
+ delete(w.CurrentRequests, e.RequestID)
+ }
+}
+
+func (w *WebDebugger) indexHandler(wr http.ResponseWriter, r *http.Request) {
+ wr.Write([]byte(`
+
+
+ Colly Debugger WebUI
+
+
+
+
+
+
+
+
+`))
+}
+
+func (w *WebDebugger) statusHandler(wr http.ResponseWriter, r *http.Request) {
+ jsonData, err := json.MarshalIndent(w, "", " ")
+ if err != nil {
+ panic(err)
+ }
+ wr.Write(jsonData)
+}
diff --git a/vendor/github.com/gocolly/colly/htmlelement.go b/vendor/github.com/gocolly/colly/htmlelement.go
new file mode 100644
index 000000000..92484bd2b
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/htmlelement.go
@@ -0,0 +1,120 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package colly
+
+import (
+ "strings"
+
+ "github.com/PuerkitoBio/goquery"
+ "golang.org/x/net/html"
+)
+
+// HTMLElement is the representation of a HTML tag.
+type HTMLElement struct {
+ // Name is the name of the tag
+ Name string
+ Text string
+ attributes []html.Attribute
+ // Request is the request object of the element's HTML document
+ Request *Request
+ // Response is the Response object of the element's HTML document
+ Response *Response
+ // DOM is the goquery parsed DOM object of the page. DOM is relative
+ // to the current HTMLElement
+ DOM *goquery.Selection
+ // Index stores the position of the current element within all the elements matched by an OnHTML callback
+ Index int
+}
+
+// NewHTMLElementFromSelectionNode creates a HTMLElement from a goquery.Selection Node.
+func NewHTMLElementFromSelectionNode(resp *Response, s *goquery.Selection, n *html.Node, idx int) *HTMLElement {
+ return &HTMLElement{
+ Name: n.Data,
+ Request: resp.Request,
+ Response: resp,
+ Text: goquery.NewDocumentFromNode(n).Text(),
+ DOM: s,
+ Index: idx,
+ attributes: n.Attr,
+ }
+}
+
+// Attr returns the selected attribute of a HTMLElement or empty string
+// if no attribute found
+func (h *HTMLElement) Attr(k string) string {
+ for _, a := range h.attributes {
+ if a.Key == k {
+ return a.Val
+ }
+ }
+ return ""
+}
+
+// ChildText returns the concatenated and stripped text content of the matching
+// elements.
+func (h *HTMLElement) ChildText(goquerySelector string) string {
+ return strings.TrimSpace(h.DOM.Find(goquerySelector).Text())
+}
+
+// ChildAttr returns the stripped text content of the first matching
+// element's attribute.
+func (h *HTMLElement) ChildAttr(goquerySelector, attrName string) string {
+ if attr, ok := h.DOM.Find(goquerySelector).Attr(attrName); ok {
+ return strings.TrimSpace(attr)
+ }
+ return ""
+}
+
+// ChildAttrs returns the stripped text content of all the matching
+// element's attributes.
+func (h *HTMLElement) ChildAttrs(goquerySelector, attrName string) []string {
+ var res []string
+ h.DOM.Find(goquerySelector).Each(func(_ int, s *goquery.Selection) {
+ if attr, ok := s.Attr(attrName); ok {
+ res = append(res, strings.TrimSpace(attr))
+ }
+ })
+ return res
+}
+
+// ForEach iterates over the elements matched by the first argument
+// and calls the callback function on every HTMLElement match.
+func (h *HTMLElement) ForEach(goquerySelector string, callback func(int, *HTMLElement)) {
+ i := 0
+ h.DOM.Find(goquerySelector).Each(func(_ int, s *goquery.Selection) {
+ for _, n := range s.Nodes {
+ callback(i, NewHTMLElementFromSelectionNode(h.Response, s, n, i))
+ i++
+ }
+ })
+}
+
+// ForEachWithBreak iterates over the elements matched by the first argument
+// and calls the callback function on every HTMLElement match.
+// It is identical to ForEach except that it is possible to break
+// out of the loop by returning false in the callback function. It returns the
+// current Selection object.
+func (h *HTMLElement) ForEachWithBreak(goquerySelector string, callback func(int, *HTMLElement) bool) {
+ i := 0
+ h.DOM.Find(goquerySelector).EachWithBreak(func(_ int, s *goquery.Selection) bool {
+ for _, n := range s.Nodes {
+ if callback(i, NewHTMLElementFromSelectionNode(h.Response, s, n, i)) {
+ i++
+ return true
+ }
+ }
+ return false
+ })
+}
diff --git a/vendor/github.com/gocolly/colly/http_backend.go b/vendor/github.com/gocolly/colly/http_backend.go
new file mode 100644
index 000000000..5c3c216d2
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/http_backend.go
@@ -0,0 +1,227 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package colly
+
+import (
+ "crypto/sha1"
+ "encoding/gob"
+ "encoding/hex"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net/http"
+ "os"
+ "path"
+ "regexp"
+ "sync"
+ "time"
+
+ "compress/gzip"
+
+ "github.com/gobwas/glob"
+)
+
+type httpBackend struct {
+ LimitRules []*LimitRule
+ Client *http.Client
+ lock *sync.RWMutex
+}
+
+// LimitRule provides connection restrictions for domains.
+// Both DomainRegexp and DomainGlob can be used to specify
+// the included domains patterns, but at least one is required.
+// There can be two kind of limitations:
+// - Parallelism: Set limit for the number of concurrent requests to matching domains
+// - Delay: Wait specified amount of time between requests (parallelism is 1 in this case)
+type LimitRule struct {
+ // DomainRegexp is a regular expression to match against domains
+ DomainRegexp string
+ // DomainRegexp is a glob pattern to match against domains
+ DomainGlob string
+ // Delay is the duration to wait before creating a new request to the matching domains
+ Delay time.Duration
+ // RandomDelay is the extra randomized duration to wait added to Delay before creating a new request
+ RandomDelay time.Duration
+ // Parallelism is the number of the maximum allowed concurrent requests of the matching domains
+ Parallelism int
+ waitChan chan bool
+ compiledRegexp *regexp.Regexp
+ compiledGlob glob.Glob
+}
+
+// Init initializes the private members of LimitRule
+func (r *LimitRule) Init() error {
+ waitChanSize := 1
+ if r.Parallelism > 1 {
+ waitChanSize = r.Parallelism
+ }
+ r.waitChan = make(chan bool, waitChanSize)
+ hasPattern := false
+ if r.DomainRegexp != "" {
+ c, err := regexp.Compile(r.DomainRegexp)
+ if err != nil {
+ return err
+ }
+ r.compiledRegexp = c
+ hasPattern = true
+ }
+ if r.DomainGlob != "" {
+ c, err := glob.Compile(r.DomainGlob)
+ if err != nil {
+ return err
+ }
+ r.compiledGlob = c
+ hasPattern = true
+ }
+ if !hasPattern {
+ return ErrNoPattern
+ }
+ return nil
+}
+
+func (h *httpBackend) Init(jar http.CookieJar) {
+ rand.Seed(time.Now().UnixNano())
+ h.Client = &http.Client{
+ Jar: jar,
+ Timeout: 10 * time.Second,
+ }
+ h.lock = &sync.RWMutex{}
+}
+
+// Match checks that the domain parameter triggers the rule
+func (r *LimitRule) Match(domain string) bool {
+ match := false
+ if r.compiledRegexp != nil && r.compiledRegexp.MatchString(domain) {
+ match = true
+ }
+ if r.compiledGlob != nil && r.compiledGlob.Match(domain) {
+ match = true
+ }
+ return match
+}
+
+func (h *httpBackend) GetMatchingRule(domain string) *LimitRule {
+ if h.LimitRules == nil {
+ return nil
+ }
+ h.lock.RLock()
+ defer h.lock.RUnlock()
+ for _, r := range h.LimitRules {
+ if r.Match(domain) {
+ return r
+ }
+ }
+ return nil
+}
+
+func (h *httpBackend) Cache(request *http.Request, bodySize int, cacheDir string) (*Response, error) {
+ if cacheDir == "" || request.Method != "GET" {
+ return h.Do(request, bodySize)
+ }
+ sum := sha1.Sum([]byte(request.URL.String()))
+ hash := hex.EncodeToString(sum[:])
+ dir := path.Join(cacheDir, hash[:2])
+ filename := path.Join(dir, hash)
+ if file, err := os.Open(filename); err == nil {
+ resp := new(Response)
+ err := gob.NewDecoder(file).Decode(resp)
+ file.Close()
+ if resp.StatusCode < 500 {
+ return resp, err
+ }
+ }
+ resp, err := h.Do(request, bodySize)
+ if err != nil || resp.StatusCode >= 500 {
+ return resp, err
+ }
+ if _, err := os.Stat(dir); err != nil {
+ if err := os.MkdirAll(dir, 0750); err != nil {
+ return resp, err
+ }
+ }
+ file, err := os.Create(filename + "~")
+ if err != nil {
+ return resp, err
+ }
+ if err := gob.NewEncoder(file).Encode(resp); err != nil {
+ file.Close()
+ return resp, err
+ }
+ file.Close()
+ return resp, os.Rename(filename+"~", filename)
+}
+
+func (h *httpBackend) Do(request *http.Request, bodySize int) (*Response, error) {
+ r := h.GetMatchingRule(request.URL.Host)
+ if r != nil {
+ r.waitChan <- true
+ defer func(r *LimitRule) {
+ randomDelay := time.Duration(0)
+ if r.RandomDelay != 0 {
+ randomDelay = time.Duration(rand.Int63n(int64(r.RandomDelay)))
+ }
+ time.Sleep(r.Delay + randomDelay)
+ <-r.waitChan
+ }(r)
+ }
+
+ res, err := h.Client.Do(request)
+ if err != nil {
+ return nil, err
+ }
+ if res.Request != nil {
+ *request = *res.Request
+ }
+
+ var bodyReader io.Reader = res.Body
+ if bodySize > 0 {
+ bodyReader = io.LimitReader(bodyReader, int64(bodySize))
+ }
+ if !res.Uncompressed && res.Header.Get("Content-Encoding") == "gzip" {
+ bodyReader, err = gzip.NewReader(bodyReader)
+ if err != nil {
+ return nil, err
+ }
+ }
+ body, err := ioutil.ReadAll(bodyReader)
+ defer res.Body.Close()
+ if err != nil {
+ return nil, err
+ }
+ return &Response{
+ StatusCode: res.StatusCode,
+ Body: body,
+ Headers: &res.Header,
+ }, nil
+}
+
+func (h *httpBackend) Limit(rule *LimitRule) error {
+ h.lock.Lock()
+ if h.LimitRules == nil {
+ h.LimitRules = make([]*LimitRule, 0, 8)
+ }
+ h.LimitRules = append(h.LimitRules, rule)
+ h.lock.Unlock()
+ return rule.Init()
+}
+
+func (h *httpBackend) Limits(rules []*LimitRule) error {
+ for _, r := range rules {
+ if err := h.Limit(r); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/gocolly/colly/request.go b/vendor/github.com/gocolly/colly/request.go
new file mode 100644
index 000000000..4b94cd209
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/request.go
@@ -0,0 +1,180 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package colly
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync/atomic"
+)
+
+// Request is the representation of a HTTP request made by a Collector
+type Request struct {
+ // URL is the parsed URL of the HTTP request
+ URL *url.URL
+ // Headers contains the Request's HTTP headers
+ Headers *http.Header
+ // Ctx is a context between a Request and a Response
+ Ctx *Context
+ // Depth is the number of the parents of the request
+ Depth int
+ // Method is the HTTP method of the request
+ Method string
+ // Body is the request body which is used on POST/PUT requests
+ Body io.Reader
+ // ResponseCharacterencoding is the character encoding of the response body.
+ // Leave it blank to allow automatic character encoding of the response body.
+ // It is empty by default and it can be set in OnRequest callback.
+ ResponseCharacterEncoding string
+ // ID is the Unique identifier of the request
+ ID uint32
+ collector *Collector
+ abort bool
+ baseURL *url.URL
+ // ProxyURL is the proxy address that handles the request
+ ProxyURL string
+}
+
+type serializableRequest struct {
+ URL string
+ Method string
+ Body []byte
+ ID uint32
+ Ctx map[string]interface{}
+ Headers http.Header
+}
+
+// New creates a new request with the context of the original request
+func (r *Request) New(method, URL string, body io.Reader) (*Request, error) {
+ u, err := url.Parse(URL)
+ if err != nil {
+ return nil, err
+ }
+ return &Request{
+ Method: method,
+ URL: u,
+ Body: body,
+ Ctx: r.Ctx,
+ Headers: &http.Header{},
+ ID: atomic.AddUint32(&r.collector.requestCount, 1),
+ collector: r.collector,
+ }, nil
+}
+
+// Abort cancels the HTTP request when called in an OnRequest callback
+func (r *Request) Abort() {
+ r.abort = true
+}
+
+// AbsoluteURL returns with the resolved absolute URL of an URL chunk.
+// AbsoluteURL returns empty string if the URL chunk is a fragment or
+// could not be parsed
+func (r *Request) AbsoluteURL(u string) string {
+ if strings.HasPrefix(u, "#") {
+ return ""
+ }
+ var base *url.URL
+ if r.baseURL != nil {
+ base = r.baseURL
+ } else {
+ base = r.URL
+ }
+ absURL, err := base.Parse(u)
+ if err != nil {
+ return ""
+ }
+ absURL.Fragment = ""
+ if absURL.Scheme == "//" {
+ absURL.Scheme = r.URL.Scheme
+ }
+ return absURL.String()
+}
+
+// Visit continues Collector's collecting job by creating a
+// request and preserves the Context of the previous request.
+// Visit also calls the previously provided callbacks
+func (r *Request) Visit(URL string) error {
+ return r.collector.scrape(r.AbsoluteURL(URL), "GET", r.Depth+1, nil, r.Ctx, nil, true)
+}
+
+// Post continues a collector job by creating a POST request and preserves the Context
+// of the previous request.
+// Post also calls the previously provided callbacks
+func (r *Request) Post(URL string, requestData map[string]string) error {
+ return r.collector.scrape(r.AbsoluteURL(URL), "POST", r.Depth+1, createFormReader(requestData), r.Ctx, nil, true)
+}
+
+// PostRaw starts a collector job by creating a POST request with raw binary data.
+// PostRaw preserves the Context of the previous request
+// and calls the previously provided callbacks
+func (r *Request) PostRaw(URL string, requestData []byte) error {
+ return r.collector.scrape(r.AbsoluteURL(URL), "POST", r.Depth+1, bytes.NewReader(requestData), r.Ctx, nil, true)
+}
+
+// PostMultipart starts a collector job by creating a Multipart POST request
+// with raw binary data. PostMultipart also calls the previously provided.
+// callbacks
+func (r *Request) PostMultipart(URL string, requestData map[string][]byte) error {
+ boundary := randomBoundary()
+ hdr := http.Header{}
+ hdr.Set("Content-Type", "multipart/form-data; boundary="+boundary)
+ hdr.Set("User-Agent", r.collector.UserAgent)
+ return r.collector.scrape(r.AbsoluteURL(URL), "POST", r.Depth+1, createMultipartReader(boundary, requestData), r.Ctx, hdr, true)
+}
+
+// Retry submits HTTP request again with the same parameters
+func (r *Request) Retry() error {
+ return r.collector.scrape(r.URL.String(), r.Method, r.Depth, r.Body, r.Ctx, *r.Headers, false)
+}
+
+// Do submits the request
+func (r *Request) Do() error {
+ return r.collector.scrape(r.URL.String(), r.Method, r.Depth, r.Body, r.Ctx, *r.Headers, !r.collector.AllowURLRevisit)
+}
+
+// Marshal serializes the Request
+func (r *Request) Marshal() ([]byte, error) {
+ ctx := make(map[string]interface{})
+ if r.Ctx != nil {
+ r.Ctx.ForEach(func(k string, v interface{}) interface{} {
+ ctx[k] = v
+ return nil
+ })
+ }
+ var err error
+ var body []byte
+ if r.Body != nil {
+ body, err = ioutil.ReadAll(r.Body)
+ if err != nil {
+ return nil, err
+ }
+ }
+ sr := &serializableRequest{
+ URL: r.URL.String(),
+ Method: r.Method,
+ Body: body,
+ ID: r.ID,
+ Ctx: ctx,
+ }
+ if r.Headers != nil {
+ sr.Headers = *r.Headers
+ }
+ return json.Marshal(sr)
+}
diff --git a/vendor/github.com/gocolly/colly/response.go b/vendor/github.com/gocolly/colly/response.go
new file mode 100644
index 000000000..29ba6ae14
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/response.go
@@ -0,0 +1,99 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package colly
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "strings"
+
+ "github.com/saintfish/chardet"
+ "golang.org/x/net/html/charset"
+)
+
+// Response is the representation of a HTTP response made by a Collector
+type Response struct {
+ // StatusCode is the status code of the Response
+ StatusCode int
+ // Body is the content of the Response
+ Body []byte
+ // Ctx is a context between a Request and a Response
+ Ctx *Context
+ // Request is the Request object of the response
+ Request *Request
+ // Headers contains the Response's HTTP headers
+ Headers *http.Header
+}
+
+// Save writes response body to disk
+func (r *Response) Save(fileName string) error {
+ return ioutil.WriteFile(fileName, r.Body, 0644)
+}
+
+// FileName returns the sanitized file name parsed from "Content-Disposition"
+// header or from URL
+func (r *Response) FileName() string {
+ _, params, err := mime.ParseMediaType(r.Headers.Get("Content-Disposition"))
+ if fName, ok := params["filename"]; ok && err == nil {
+ return SanitizeFileName(fName)
+ }
+ if r.Request.URL.RawQuery != "" {
+ return SanitizeFileName(fmt.Sprintf("%s_%s", r.Request.URL.Path, r.Request.URL.RawQuery))
+ }
+ return SanitizeFileName(strings.TrimPrefix(r.Request.URL.Path, "/"))
+}
+
+func (r *Response) fixCharset(detectCharset bool, defaultEncoding string) error {
+ if defaultEncoding != "" {
+ tmpBody, err := encodeBytes(r.Body, "text/plain; charset="+defaultEncoding)
+ if err != nil {
+ return err
+ }
+ r.Body = tmpBody
+ return nil
+ }
+ contentType := strings.ToLower(r.Headers.Get("Content-Type"))
+ if !strings.Contains(contentType, "charset") {
+ if !detectCharset {
+ return nil
+ }
+ d := chardet.NewTextDetector()
+ r, err := d.DetectBest(r.Body)
+ if err != nil {
+ return err
+ }
+ contentType = "text/plain; charset=" + r.Charset
+ }
+ if strings.Contains(contentType, "utf-8") || strings.Contains(contentType, "utf8") {
+ return nil
+ }
+ tmpBody, err := encodeBytes(r.Body, contentType)
+ if err != nil {
+ return err
+ }
+ r.Body = tmpBody
+ return nil
+}
+
+func encodeBytes(b []byte, contentType string) ([]byte, error) {
+ r, err := charset.NewReader(bytes.NewReader(b), contentType)
+ if err != nil {
+ return nil, err
+ }
+ return ioutil.ReadAll(r)
+}
diff --git a/vendor/github.com/gocolly/colly/storage/storage.go b/vendor/github.com/gocolly/colly/storage/storage.go
new file mode 100644
index 000000000..fcb0c0ce1
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/storage/storage.go
@@ -0,0 +1,128 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "net/http"
+ "net/http/cookiejar"
+ "net/url"
+ "strings"
+ "sync"
+)
+
+// Storage is an interface which handles Collector's internal data,
+// like visited urls and cookies.
+// The default Storage of the Collector is the InMemoryStorage.
+// Collector's storage can be changed by calling Collector.SetStorage()
+// function.
+type Storage interface {
+ // Init initializes the storage
+ Init() error
+ // Visited receives and stores a request ID that is visited by the Collector
+ Visited(requestID uint64) error
+ // IsVisited returns true if the request was visited before IsVisited
+ // is called
+ IsVisited(requestID uint64) (bool, error)
+ // Cookies retrieves stored cookies for a given host
+ Cookies(u *url.URL) string
+ // SetCookies stores cookies for a given host
+ SetCookies(u *url.URL, cookies string)
+}
+
+// InMemoryStorage is the default storage backend of colly.
+// InMemoryStorage keeps cookies and visited urls in memory
+// without persisting data on the disk.
+type InMemoryStorage struct {
+ visitedURLs map[uint64]bool
+ lock *sync.RWMutex
+ jar *cookiejar.Jar
+}
+
+// Init initializes InMemoryStorage
+func (s *InMemoryStorage) Init() error {
+ if s.visitedURLs == nil {
+ s.visitedURLs = make(map[uint64]bool)
+ }
+ if s.lock == nil {
+ s.lock = &sync.RWMutex{}
+ }
+ if s.jar == nil {
+ var err error
+ s.jar, err = cookiejar.New(nil)
+ return err
+ }
+ return nil
+}
+
+// Visited implements Storage.Visited()
+func (s *InMemoryStorage) Visited(requestID uint64) error {
+ s.lock.Lock()
+ s.visitedURLs[requestID] = true
+ s.lock.Unlock()
+ return nil
+}
+
+// IsVisited implements Storage.IsVisited()
+func (s *InMemoryStorage) IsVisited(requestID uint64) (bool, error) {
+ s.lock.RLock()
+ visited := s.visitedURLs[requestID]
+ s.lock.RUnlock()
+ return visited, nil
+}
+
+// Cookies implements Storage.Cookies()
+func (s *InMemoryStorage) Cookies(u *url.URL) string {
+ return StringifyCookies(s.jar.Cookies(u))
+}
+
+// SetCookies implements Storage.SetCookies()
+func (s *InMemoryStorage) SetCookies(u *url.URL, cookies string) {
+ s.jar.SetCookies(u, UnstringifyCookies(cookies))
+}
+
+// Close implements Storage.Close()
+func (s *InMemoryStorage) Close() error {
+ return nil
+}
+
+// StringifyCookies serializes list of http.Cookies to string
+func StringifyCookies(cookies []*http.Cookie) string {
+ // Stringify cookies.
+ cs := make([]string, len(cookies))
+ for i, c := range cookies {
+ cs[i] = c.String()
+ }
+ return strings.Join(cs, "\n")
+}
+
+// UnstringifyCookies deserializes a cookie string to http.Cookies
+func UnstringifyCookies(s string) []*http.Cookie {
+ h := http.Header{}
+ for _, c := range strings.Split(s, "\n") {
+ h.Add("Set-Cookie", c)
+ }
+ r := http.Response{Header: h}
+ return r.Cookies()
+}
+
+// ContainsCookie checks if a cookie name is represented in cookies
+func ContainsCookie(cookies []*http.Cookie, name string) bool {
+ for _, c := range cookies {
+ if c.Name == name {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/gocolly/colly/unmarshal.go b/vendor/github.com/gocolly/colly/unmarshal.go
new file mode 100644
index 000000000..c4a66c4a6
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/unmarshal.go
@@ -0,0 +1,171 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package colly
+
+import (
+ "errors"
+ "reflect"
+ "strings"
+
+ "github.com/PuerkitoBio/goquery"
+)
+
+// Unmarshal is a shorthand for colly.UnmarshalHTML
+func (h *HTMLElement) Unmarshal(v interface{}) error {
+ return UnmarshalHTML(v, h.DOM)
+}
+
+// UnmarshalHTML declaratively extracts text or attributes to a struct from
+// HTML response using struct tags composed of css selectors.
+// Allowed struct tags:
+// - "selector" (required): CSS (goquery) selector of the desired data
+// - "attr" (optional): Selects the matching element's attribute's value.
+// Leave it blank or omit to get the text of the element.
+//
+// Example struct declaration:
+//
+// type Nested struct {
+// String string `selector:"div > p"`
+// Classes []string `selector:"li" attr:"class"`
+// Struct *Nested `selector:"div > div"`
+// }
+//
+// Supported types: struct, *struct, string, []string
+func UnmarshalHTML(v interface{}, s *goquery.Selection) error {
+ rv := reflect.ValueOf(v)
+
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return errors.New("Invalid type or nil-pointer")
+ }
+
+ sv := rv.Elem()
+ st := reflect.TypeOf(v).Elem()
+
+ for i := 0; i < sv.NumField(); i++ {
+ attrV := sv.Field(i)
+ if !attrV.CanAddr() || !attrV.CanSet() {
+ continue
+ }
+ if err := unmarshalAttr(s, attrV, st.Field(i)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func unmarshalAttr(s *goquery.Selection, attrV reflect.Value, attrT reflect.StructField) error {
+ selector := attrT.Tag.Get("selector")
+ //selector is "-" specify that field should ignore.
+ if selector == "-" {
+ return nil
+ }
+ htmlAttr := attrT.Tag.Get("attr")
+ // TODO support more types
+ switch attrV.Kind() {
+ case reflect.Slice:
+ if err := unmarshalSlice(s, selector, htmlAttr, attrV); err != nil {
+ return err
+ }
+ case reflect.String:
+ val := getDOMValue(s.Find(selector), htmlAttr)
+ attrV.Set(reflect.Indirect(reflect.ValueOf(val)))
+ case reflect.Struct:
+ if err := unmarshalStruct(s, selector, attrV); err != nil {
+ return err
+ }
+ case reflect.Ptr:
+ if err := unmarshalPtr(s, selector, attrV); err != nil {
+ return err
+ }
+ default:
+ return errors.New("Invalid type: " + attrV.String())
+ }
+ return nil
+}
+
+func unmarshalStruct(s *goquery.Selection, selector string, attrV reflect.Value) error {
+ newS := s
+ if selector != "" {
+ newS = newS.Find(selector)
+ }
+ if newS.Nodes == nil {
+ return nil
+ }
+ v := reflect.New(attrV.Type())
+ err := UnmarshalHTML(v.Interface(), newS)
+ if err != nil {
+ return err
+ }
+ attrV.Set(reflect.Indirect(v))
+ return nil
+}
+
+func unmarshalPtr(s *goquery.Selection, selector string, attrV reflect.Value) error {
+ newS := s
+ if selector != "" {
+ newS = newS.Find(selector)
+ }
+ if newS.Nodes == nil {
+ return nil
+ }
+ e := attrV.Type().Elem()
+ if e.Kind() != reflect.Struct {
+ return errors.New("Invalid slice type")
+ }
+ v := reflect.New(e)
+ err := UnmarshalHTML(v.Interface(), newS)
+ if err != nil {
+ return err
+ }
+ attrV.Set(v)
+ return nil
+}
+
+func unmarshalSlice(s *goquery.Selection, selector, htmlAttr string, attrV reflect.Value) error {
+ if attrV.Pointer() == 0 {
+ v := reflect.MakeSlice(attrV.Type(), 0, 0)
+ attrV.Set(v)
+ }
+ switch attrV.Type().Elem().Kind() {
+ case reflect.String:
+ s.Find(selector).Each(func(_ int, s *goquery.Selection) {
+ val := getDOMValue(s, htmlAttr)
+ attrV.Set(reflect.Append(attrV, reflect.Indirect(reflect.ValueOf(val))))
+ })
+ case reflect.Ptr:
+ s.Find(selector).Each(func(_ int, innerSel *goquery.Selection) {
+ someVal := reflect.New(attrV.Type().Elem().Elem())
+ UnmarshalHTML(someVal.Interface(), innerSel)
+ attrV.Set(reflect.Append(attrV, someVal))
+ })
+ case reflect.Struct:
+ s.Find(selector).Each(func(_ int, innerSel *goquery.Selection) {
+ someVal := reflect.New(attrV.Type().Elem())
+ UnmarshalHTML(someVal.Interface(), innerSel)
+ attrV.Set(reflect.Append(attrV, reflect.Indirect(someVal)))
+ })
+ default:
+ return errors.New("Invalid slice type")
+ }
+ return nil
+}
+
+func getDOMValue(s *goquery.Selection, attr string) string {
+ if attr == "" {
+ return strings.TrimSpace(s.First().Text())
+ }
+ attrV, _ := s.Attr(attr)
+ return attrV
+}
diff --git a/vendor/github.com/gocolly/colly/xmlelement.go b/vendor/github.com/gocolly/colly/xmlelement.go
new file mode 100644
index 000000000..7ff5fe553
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/xmlelement.go
@@ -0,0 +1,170 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package colly
+
+import (
+ "encoding/xml"
+ "strings"
+
+ "github.com/antchfx/htmlquery"
+ "github.com/antchfx/xmlquery"
+ "golang.org/x/net/html"
+)
+
+// XMLElement is the representation of a XML tag.
+type XMLElement struct {
+ // Name is the name of the tag
+ Name string
+ Text string
+ attributes interface{}
+ // Request is the request object of the element's HTML document
+ Request *Request
+ // Response is the Response object of the element's HTML document
+ Response *Response
+ // DOM is the DOM object of the page. DOM is relative
+ // to the current XMLElement and is either a html.Node or xmlquery.Node
+ // based on how the XMLElement was created.
+ DOM interface{}
+ isHTML bool
+}
+
+// NewXMLElementFromHTMLNode creates a XMLElement from a html.Node.
+func NewXMLElementFromHTMLNode(resp *Response, s *html.Node) *XMLElement {
+ return &XMLElement{
+ Name: s.Data,
+ Request: resp.Request,
+ Response: resp,
+ Text: htmlquery.InnerText(s),
+ DOM: s,
+ attributes: s.Attr,
+ isHTML: true,
+ }
+}
+
+// NewXMLElementFromXMLNode creates a XMLElement from a xmlquery.Node.
+func NewXMLElementFromXMLNode(resp *Response, s *xmlquery.Node) *XMLElement {
+ return &XMLElement{
+ Name: s.Data,
+ Request: resp.Request,
+ Response: resp,
+ Text: s.InnerText(),
+ DOM: s,
+ attributes: s.Attr,
+ isHTML: false,
+ }
+}
+
+// Attr returns the selected attribute of a HTMLElement or empty string
+// if no attribute found
+func (h *XMLElement) Attr(k string) string {
+ if h.isHTML {
+ for _, a := range h.attributes.([]html.Attribute) {
+ if a.Key == k {
+ return a.Val
+ }
+ }
+ } else {
+ for _, a := range h.attributes.([]xml.Attr) {
+ if a.Name.Local == k {
+ return a.Value
+ }
+ }
+ }
+ return ""
+}
+
+// ChildText returns the concatenated and stripped text content of the matching
+// elements.
+func (h *XMLElement) ChildText(xpathQuery string) string {
+ if h.isHTML {
+ child := htmlquery.FindOne(h.DOM.(*html.Node), xpathQuery)
+ if child == nil {
+ return ""
+ }
+ return strings.TrimSpace(htmlquery.InnerText(child))
+ }
+ child := xmlquery.FindOne(h.DOM.(*xmlquery.Node), xpathQuery)
+ if child == nil {
+ return ""
+ }
+ return strings.TrimSpace(child.InnerText())
+
+}
+
+// ChildAttr returns the stripped text content of the first matching
+// element's attribute.
+func (h *XMLElement) ChildAttr(xpathQuery, attrName string) string {
+ if h.isHTML {
+ child := htmlquery.FindOne(h.DOM.(*html.Node), xpathQuery)
+ if child != nil {
+ for _, attr := range child.Attr {
+ if attr.Key == attrName {
+ return strings.TrimSpace(attr.Val)
+ }
+ }
+ }
+ } else {
+ child := xmlquery.FindOne(h.DOM.(*xmlquery.Node), xpathQuery)
+ if child != nil {
+ for _, attr := range child.Attr {
+ if attr.Name.Local == attrName {
+ return strings.TrimSpace(attr.Value)
+ }
+ }
+ }
+ }
+
+ return ""
+}
+
+// ChildAttrs returns the stripped text content of all the matching
+// element's attributes.
+func (h *XMLElement) ChildAttrs(xpathQuery, attrName string) []string {
+ var res []string
+ if h.isHTML {
+ for _, child := range htmlquery.Find(h.DOM.(*html.Node), xpathQuery) {
+ for _, attr := range child.Attr {
+ if attr.Key == attrName {
+ res = append(res, strings.TrimSpace(attr.Val))
+ }
+ }
+ }
+ } else {
+ xmlquery.FindEach(h.DOM.(*xmlquery.Node), xpathQuery, func(i int, child *xmlquery.Node) {
+ for _, attr := range child.Attr {
+ if attr.Name.Local == attrName {
+ res = append(res, strings.TrimSpace(attr.Value))
+ }
+ }
+ })
+ }
+ return res
+}
+
+// ChildTexts returns an array of strings corresponding to child elements that match the xpath query.
+// Each item in the array is the stripped text content of the corresponding matching child element.
+func (h *XMLElement) ChildTexts(xpathQuery string) []string {
+ texts := make([]string, 0)
+ if h.isHTML {
+ for _, child := range htmlquery.Find(h.DOM.(*html.Node), xpathQuery) {
+ texts = append(texts, strings.TrimSpace(htmlquery.InnerText(child)))
+ }
+ } else {
+ xmlquery.FindEach(h.DOM.(*xmlquery.Node), xpathQuery, func(i int, child *xmlquery.Node) {
+ texts = append(texts, strings.TrimSpace(child.InnerText()))
+ })
+ }
+ return texts
+}
diff --git a/vendor/github.com/kennygrant/sanitize/.gitignore b/vendor/github.com/kennygrant/sanitize/.gitignore
new file mode 100644
index 000000000..00268614f
--- /dev/null
+++ b/vendor/github.com/kennygrant/sanitize/.gitignore
@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/vendor/github.com/kennygrant/sanitize/.travis.yml b/vendor/github.com/kennygrant/sanitize/.travis.yml
new file mode 100644
index 000000000..4f2ee4d97
--- /dev/null
+++ b/vendor/github.com/kennygrant/sanitize/.travis.yml
@@ -0,0 +1 @@
+language: go
diff --git a/vendor/github.com/kennygrant/sanitize/LICENSE b/vendor/github.com/kennygrant/sanitize/LICENSE
new file mode 100644
index 000000000..749ebb2ca
--- /dev/null
+++ b/vendor/github.com/kennygrant/sanitize/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2017 Mechanism Design. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/github.com/kennygrant/sanitize/README.md b/vendor/github.com/kennygrant/sanitize/README.md
new file mode 100644
index 000000000..4401ef702
--- /dev/null
+++ b/vendor/github.com/kennygrant/sanitize/README.md
@@ -0,0 +1,62 @@
+sanitize [![GoDoc](https://godoc.org/github.com/kennygrant/sanitize?status.svg)](https://godoc.org/github.com/kennygrant/sanitize) [![Go Report Card](https://goreportcard.com/badge/github.com/kennygrant/sanitize)](https://goreportcard.com/report/github.com/kennygrant/sanitize) [![CircleCI](https://circleci.com/gh/kennygrant/sanitize.svg?style=svg)](https://circleci.com/gh/kennygrant/sanitize)
+========
+
+Package sanitize provides functions to sanitize html and paths with go (golang).
+
+FUNCTIONS
+
+
+```go
+sanitize.Accents(s string) string
+```
+
+Accents replaces a set of accented characters with ascii equivalents.
+
+```go
+sanitize.BaseName(s string) string
+```
+
+BaseName makes a string safe to use in a file name, producing a sanitized basename replacing . or / with -. Unlike Name no attempt is made to normalise text as a path.
+
+```go
+sanitize.HTML(s string) string
+```
+
+HTML strips html tags with a very simple parser, replace common entities, and escape < and > in the result. The result is intended to be used as plain text.
+
+```go
+sanitize.HTMLAllowing(s string, args...[]string) (string, error)
+```
+
+HTMLAllowing parses html and allow certain tags and attributes from the lists optionally specified by args - args[0] is a list of allowed tags, args[1] is a list of allowed attributes. If either is missing default sets are used.
+
+```go
+sanitize.Name(s string) string
+```
+
+Name makes a string safe to use in a file name by first finding the path basename, then replacing non-ascii characters.
+
+```go
+sanitize.Path(s string) string
+```
+
+Path makes a string safe to use as an url path.
+
+
+Changes
+-------
+
+Version 1.2
+
+Adjusted HTML function to avoid linter warning
+Added more tests from https://githubengineering.com/githubs-post-csp-journey/
+Chnaged name of license file
+Added badges and change log to readme
+
+Version 1.1
+Fixed type in comments.
+Merge pull request from Povilas Balzaravicius Pawka
+ - replace br tags with newline even when they contain a space
+
+Version 1.0
+First release
\ No newline at end of file
diff --git a/vendor/github.com/kennygrant/sanitize/sanitize.go b/vendor/github.com/kennygrant/sanitize/sanitize.go
new file mode 100644
index 000000000..2932209ca
--- /dev/null
+++ b/vendor/github.com/kennygrant/sanitize/sanitize.go
@@ -0,0 +1,388 @@
+// Package sanitize provides functions for sanitizing text.
+package sanitize
+
+import (
+ "bytes"
+ "html"
+ "html/template"
+ "io"
+ "path"
+ "regexp"
+ "strings"
+
+ parser "golang.org/x/net/html"
+)
+
+var (
+ ignoreTags = []string{"title", "script", "style", "iframe", "frame", "frameset", "noframes", "noembed", "embed", "applet", "object", "base"}
+
+ defaultTags = []string{"h1", "h2", "h3", "h4", "h5", "h6", "div", "span", "hr", "p", "br", "b", "i", "strong", "em", "ol", "ul", "li", "a", "img", "pre", "code", "blockquote", "article", "section"}
+
+ defaultAttributes = []string{"id", "class", "src", "href", "title", "alt", "name", "rel"}
+)
+
+// HTMLAllowing sanitizes html, allowing some tags.
+// Arrays of allowed tags and allowed attributes may optionally be passed as the second and third arguments.
+func HTMLAllowing(s string, args ...[]string) (string, error) {
+
+ allowedTags := defaultTags
+ if len(args) > 0 {
+ allowedTags = args[0]
+ }
+ allowedAttributes := defaultAttributes
+ if len(args) > 1 {
+ allowedAttributes = args[1]
+ }
+
+ // Parse the html
+ tokenizer := parser.NewTokenizer(strings.NewReader(s))
+
+ buffer := bytes.NewBufferString("")
+ ignore := ""
+
+ for {
+ tokenType := tokenizer.Next()
+ token := tokenizer.Token()
+
+ switch tokenType {
+
+ case parser.ErrorToken:
+ err := tokenizer.Err()
+ if err == io.EOF {
+ return buffer.String(), nil
+ }
+ return "", err
+
+ case parser.StartTagToken:
+
+ if len(ignore) == 0 && includes(allowedTags, token.Data) {
+ token.Attr = cleanAttributes(token.Attr, allowedAttributes)
+ buffer.WriteString(token.String())
+ } else if includes(ignoreTags, token.Data) {
+ ignore = token.Data
+ }
+
+ case parser.SelfClosingTagToken:
+
+ if len(ignore) == 0 && includes(allowedTags, token.Data) {
+ token.Attr = cleanAttributes(token.Attr, allowedAttributes)
+ buffer.WriteString(token.String())
+ } else if token.Data == ignore {
+ ignore = ""
+ }
+
+ case parser.EndTagToken:
+ if len(ignore) == 0 && includes(allowedTags, token.Data) {
+ token.Attr = []parser.Attribute{}
+ buffer.WriteString(token.String())
+ } else if token.Data == ignore {
+ ignore = ""
+ }
+
+ case parser.TextToken:
+ // We allow text content through, unless ignoring this entire tag and its contents (including other tags)
+ if ignore == "" {
+ buffer.WriteString(token.String())
+ }
+ case parser.CommentToken:
+ // We ignore comments by default
+ case parser.DoctypeToken:
+ // We ignore doctypes by default - html5 does not require them and this is intended for sanitizing snippets of text
+ default:
+ // We ignore unknown token types by default
+
+ }
+
+ }
+
+}
+
+// HTML strips html tags, replace common entities, and escapes <>&;'" in the result.
+// Note the returned text may contain entities as it is escaped by HTMLEscapeString, and most entities are not translated.
+func HTML(s string) (output string) {
+
+ // Shortcut strings with no tags in them
+ if !strings.ContainsAny(s, "<>") {
+ output = s
+ } else {
+
+ // First remove line breaks etc as these have no meaning outside html tags (except pre)
+ // this means pre sections will lose formatting... but will result in less unintentional paras.
+ s = strings.Replace(s, "\n", "", -1)
+
+ // Then replace line breaks with newlines, to preserve that formatting
+ s = strings.Replace(s, "
", "\n", -1)
+ s = strings.Replace(s, " ", "\n", -1)
+ s = strings.Replace(s, "", "\n", -1)
+ s = strings.Replace(s, " ", "\n", -1)
+ s = strings.Replace(s, " ", "\n", -1)
+
+ // Walk through the string removing all tags
+ b := bytes.NewBufferString("")
+ inTag := false
+ for _, r := range s {
+ switch r {
+ case '<':
+ inTag = true
+ case '>':
+ inTag = false
+ default:
+ if !inTag {
+ b.WriteRune(r)
+ }
+ }
+ }
+ output = b.String()
+ }
+
+ // Remove a few common harmless entities, to arrive at something more like plain text
+ output = strings.Replace(output, "‘", "'", -1)
+ output = strings.Replace(output, "’", "'", -1)
+ output = strings.Replace(output, "“", "\"", -1)
+ output = strings.Replace(output, "”", "\"", -1)
+ output = strings.Replace(output, " ", " ", -1)
+ output = strings.Replace(output, """, "\"", -1)
+ output = strings.Replace(output, "'", "'", -1)
+
+ // Translate some entities into their plain text equivalent (for example accents, if encoded as entities)
+ output = html.UnescapeString(output)
+
+ // In case we have missed any tags above, escape the text - removes <, >, &, ' and ".
+ output = template.HTMLEscapeString(output)
+
+ // After processing, remove some harmless entities &, ' and " which are encoded by HTMLEscapeString
+ output = strings.Replace(output, """, "\"", -1)
+ output = strings.Replace(output, "'", "'", -1)
+ output = strings.Replace(output, "& ", "& ", -1) // NB space after
+ output = strings.Replace(output, "& ", "& ", -1) // NB space after
+
+ return output
+}
+
+// We are very restrictive as this is intended for ascii url slugs
+var illegalPath = regexp.MustCompile(`[^[:alnum:]\~\-\./]`)
+
+// Path makes a string safe to use as a URL path,
+// removing accents and replacing separators with -.
+// The path may still start at / and is not intended
+// for use as a file system path without prefix.
+func Path(s string) string {
+ // Start with lowercase string
+ filePath := strings.ToLower(s)
+ filePath = strings.Replace(filePath, "..", "", -1)
+ filePath = path.Clean(filePath)
+
+ // Remove illegal characters for paths, flattening accents
+ // and replacing some common separators with -
+ filePath = cleanString(filePath, illegalPath)
+
+ // NB this may be of length 0, caller must check
+ return filePath
+}
+
+// Remove all other unrecognised characters apart from
+var illegalName = regexp.MustCompile(`[^[:alnum:]-.]`)
+
+// Name makes a string safe to use in a file name by first finding the path basename, then replacing non-ascii characters.
+func Name(s string) string {
+ // Start with lowercase string
+ fileName := strings.ToLower(s)
+ fileName = path.Clean(path.Base(fileName))
+
+ // Remove illegal characters for names, replacing some common separators with -
+ fileName = cleanString(fileName, illegalName)
+
+ // NB this may be of length 0, caller must check
+ return fileName
+}
+
+// Replace these separators with -
+var baseNameSeparators = regexp.MustCompile(`[./]`)
+
+// BaseName makes a string safe to use in a file name, producing a sanitized basename replacing . or / with -.
+// No attempt is made to normalise a path or normalise case.
+func BaseName(s string) string {
+
+ // Replace certain joining characters with a dash
+ baseName := baseNameSeparators.ReplaceAllString(s, "-")
+
+ // Remove illegal characters for names, replacing some common separators with -
+ baseName = cleanString(baseName, illegalName)
+
+ // NB this may be of length 0, caller must check
+ return baseName
+}
+
+// A very limited list of transliterations to catch common european names translated to urls.
+// This set could be expanded with at least caps and many more characters.
+var transliterations = map[rune]string{
+ 'À': "A",
+ 'Á': "A",
+ 'Â': "A",
+ 'Ã': "A",
+ 'Ä': "A",
+ 'Å': "AA",
+ 'Æ': "AE",
+ 'Ç': "C",
+ 'È': "E",
+ 'É': "E",
+ 'Ê': "E",
+ 'Ë': "E",
+ 'Ì': "I",
+ 'Í': "I",
+ 'Î': "I",
+ 'Ï': "I",
+ 'Ð': "D",
+ 'Ł': "L",
+ 'Ñ': "N",
+ 'Ò': "O",
+ 'Ó': "O",
+ 'Ô': "O",
+ 'Õ': "O",
+ 'Ö': "OE",
+ 'Ø': "OE",
+ 'Œ': "OE",
+ 'Ù': "U",
+ 'Ú': "U",
+ 'Ü': "UE",
+ 'Û': "U",
+ 'Ý': "Y",
+ 'Þ': "TH",
+ 'ẞ': "SS",
+ 'à': "a",
+ 'á': "a",
+ 'â': "a",
+ 'ã': "a",
+ 'ä': "ae",
+ 'å': "aa",
+ 'æ': "ae",
+ 'ç': "c",
+ 'è': "e",
+ 'é': "e",
+ 'ê': "e",
+ 'ë': "e",
+ 'ì': "i",
+ 'í': "i",
+ 'î': "i",
+ 'ï': "i",
+ 'ð': "d",
+ 'ł': "l",
+ 'ñ': "n",
+ 'ń': "n",
+ 'ò': "o",
+ 'ó': "o",
+ 'ô': "o",
+ 'õ': "o",
+ 'ō': "o",
+ 'ö': "oe",
+ 'ø': "oe",
+ 'œ': "oe",
+ 'ś': "s",
+ 'ù': "u",
+ 'ú': "u",
+ 'û': "u",
+ 'ū': "u",
+ 'ü': "ue",
+ 'ý': "y",
+ 'ÿ': "y",
+ 'ż': "z",
+ 'þ': "th",
+ 'ß': "ss",
+}
+
+// Accents replaces a set of accented characters with ascii equivalents.
+func Accents(s string) string {
+ // Replace some common accent characters
+ b := bytes.NewBufferString("")
+ for _, c := range s {
+ // Check transliterations first
+ if val, ok := transliterations[c]; ok {
+ b.WriteString(val)
+ } else {
+ b.WriteRune(c)
+ }
+ }
+ return b.String()
+}
+
+var (
+ // If the attribute contains data: or javascript: anywhere, ignore it
+ // we don't allow this in attributes as it is so frequently used for xss
+ // NB we allow spaces in the value, and lowercase.
+ illegalAttr = regexp.MustCompile(`(d\s*a\s*t\s*a|j\s*a\s*v\s*a\s*s\s*c\s*r\s*i\s*p\s*t\s*)\s*:`)
+
+ // We are far more restrictive with href attributes.
+ legalHrefAttr = regexp.MustCompile(`\A[/#][^/\\]?|mailto:|http://|https://`)
+)
+
+// cleanAttributes returns an array of attributes after removing malicious ones.
+func cleanAttributes(a []parser.Attribute, allowed []string) []parser.Attribute {
+ if len(a) == 0 {
+ return a
+ }
+
+ var cleaned []parser.Attribute
+ for _, attr := range a {
+ if includes(allowed, attr.Key) {
+
+ val := strings.ToLower(attr.Val)
+
+ // Check for illegal attribute values
+ if illegalAttr.FindString(val) != "" {
+ attr.Val = ""
+ }
+
+ // Check for legal href values - / mailto:// http:// or https://
+ if attr.Key == "href" {
+ if legalHrefAttr.FindString(val) == "" {
+ attr.Val = ""
+ }
+ }
+
+ // If we still have an attribute, append it to the array
+ if attr.Val != "" {
+ cleaned = append(cleaned, attr)
+ }
+ }
+ }
+ return cleaned
+}
+
+// A list of characters we consider separators in normal strings and replace with our canonical separator - rather than removing.
+var (
+ separators = regexp.MustCompile(`[ &_=+:]`)
+
+ dashes = regexp.MustCompile(`[\-]+`)
+)
+
+// cleanString replaces separators with - and removes characters listed in the regexp provided from string.
+// Accents, spaces, and all characters not in A-Za-z0-9 are replaced.
+func cleanString(s string, r *regexp.Regexp) string {
+
+ // Remove any trailing space to avoid ending on -
+ s = strings.Trim(s, " ")
+
+ // Flatten accents first so that if we remove non-ascii we still get a legible name
+ s = Accents(s)
+
+ // Replace certain joining characters with a dash
+ s = separators.ReplaceAllString(s, "-")
+
+ // Remove all other unrecognised characters - NB we do allow any printable characters
+ s = r.ReplaceAllString(s, "")
+
+ // Remove any multiple dashes caused by replacements above
+ s = dashes.ReplaceAllString(s, "-")
+
+ return s
+}
+
+// includes checks for inclusion of a string in a []string.
+func includes(a []string, s string) bool {
+ for _, as := range a {
+ if as == s {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/saintfish/chardet/2022.go b/vendor/github.com/saintfish/chardet/2022.go
new file mode 100644
index 000000000..e667225e5
--- /dev/null
+++ b/vendor/github.com/saintfish/chardet/2022.go
@@ -0,0 +1,102 @@
+package chardet
+
+import (
+ "bytes"
+)
+
+type recognizer2022 struct {
+ charset string
+ escapes [][]byte
+}
+
+func (r *recognizer2022) Match(input *recognizerInput) (output recognizerOutput) {
+ return recognizerOutput{
+ Charset: r.charset,
+ Confidence: r.matchConfidence(input.input),
+ }
+}
+
+func (r *recognizer2022) matchConfidence(input []byte) int {
+ var hits, misses, shifts int
+input:
+ for i := 0; i < len(input); i++ {
+ c := input[i]
+ if c == 0x1B {
+ for _, esc := range r.escapes {
+ if bytes.HasPrefix(input[i+1:], esc) {
+ hits++
+ i += len(esc)
+ continue input
+ }
+ }
+ misses++
+ } else if c == 0x0E || c == 0x0F {
+ shifts++
+ }
+ }
+ if hits == 0 {
+ return 0
+ }
+ quality := (100*hits - 100*misses) / (hits + misses)
+ if hits+shifts < 5 {
+ quality -= (5 - (hits + shifts)) * 10
+ }
+ if quality < 0 {
+ quality = 0
+ }
+ return quality
+}
+
+var escapeSequences_2022JP = [][]byte{
+ {0x24, 0x28, 0x43}, // KS X 1001:1992
+ {0x24, 0x28, 0x44}, // JIS X 212-1990
+ {0x24, 0x40}, // JIS C 6226-1978
+ {0x24, 0x41}, // GB 2312-80
+ {0x24, 0x42}, // JIS X 208-1983
+ {0x26, 0x40}, // JIS X 208 1990, 1997
+ {0x28, 0x42}, // ASCII
+ {0x28, 0x48}, // JIS-Roman
+ {0x28, 0x49}, // Half-width katakana
+ {0x28, 0x4a}, // JIS-Roman
+ {0x2e, 0x41}, // ISO 8859-1
+ {0x2e, 0x46}, // ISO 8859-7
+}
+
+var escapeSequences_2022KR = [][]byte{
+ {0x24, 0x29, 0x43},
+}
+
+var escapeSequences_2022CN = [][]byte{
+ {0x24, 0x29, 0x41}, // GB 2312-80
+ {0x24, 0x29, 0x47}, // CNS 11643-1992 Plane 1
+ {0x24, 0x2A, 0x48}, // CNS 11643-1992 Plane 2
+ {0x24, 0x29, 0x45}, // ISO-IR-165
+ {0x24, 0x2B, 0x49}, // CNS 11643-1992 Plane 3
+ {0x24, 0x2B, 0x4A}, // CNS 11643-1992 Plane 4
+ {0x24, 0x2B, 0x4B}, // CNS 11643-1992 Plane 5
+ {0x24, 0x2B, 0x4C}, // CNS 11643-1992 Plane 6
+ {0x24, 0x2B, 0x4D}, // CNS 11643-1992 Plane 7
+ {0x4e}, // SS2
+ {0x4f}, // SS3
+}
+
+func newRecognizer_2022JP() *recognizer2022 {
+ return &recognizer2022{
+ "ISO-2022-JP",
+ escapeSequences_2022JP,
+ }
+}
+
+func newRecognizer_2022KR() *recognizer2022 {
+ return &recognizer2022{
+ "ISO-2022-KR",
+ escapeSequences_2022KR,
+ }
+}
+
+func newRecognizer_2022CN() *recognizer2022 {
+ return &recognizer2022{
+ "ISO-2022-CN",
+ escapeSequences_2022CN,
+ }
+}
diff --git a/vendor/github.com/saintfish/chardet/AUTHORS b/vendor/github.com/saintfish/chardet/AUTHORS
new file mode 100644
index 000000000..842d0216d
--- /dev/null
+++ b/vendor/github.com/saintfish/chardet/AUTHORS
@@ -0,0 +1 @@
+Sheng Yu (yusheng dot sjtu at gmail dot com)
diff --git a/vendor/github.com/saintfish/chardet/LICENSE b/vendor/github.com/saintfish/chardet/LICENSE
new file mode 100644
index 000000000..35ee796b9
--- /dev/null
+++ b/vendor/github.com/saintfish/chardet/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2012 chardet Authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+Partial of the Software is derived from ICU project. See icu-license.html for
+license of the derivative portions.
diff --git a/vendor/github.com/saintfish/chardet/README.md b/vendor/github.com/saintfish/chardet/README.md
new file mode 100644
index 000000000..4281eecc7
--- /dev/null
+++ b/vendor/github.com/saintfish/chardet/README.md
@@ -0,0 +1,10 @@
+# chardet
+
+chardet is library to automatically detect
+[charset](http://en.wikipedia.org/wiki/Character_encoding) of texts for [Go
+programming language](http://golang.org/). It's based on the algorithm and data
+in [ICU](http://icu-project.org/)'s implementation.
+
+## Documentation and Usage
+
+See [pkgdoc](http://go.pkgdoc.org/github.com/saintfish/chardet)
diff --git a/vendor/github.com/saintfish/chardet/detector.go b/vendor/github.com/saintfish/chardet/detector.go
new file mode 100644
index 000000000..e11c222e4
--- /dev/null
+++ b/vendor/github.com/saintfish/chardet/detector.go
@@ -0,0 +1,136 @@
+// Package chardet ports character set detection from ICU.
+package chardet
+
+import (
+ "errors"
+ "sort"
+)
+
+// Result contains all the information that charset detector gives.
+type Result struct {
+ // IANA name of the detected charset.
+ Charset string
+ // IANA name of the detected language. It may be empty for some charsets.
+ Language string
+ // Confidence of the Result. Scale from 1 to 100. The bigger, the more confident.
+ Confidence int
+}
+
+// Detector implements charset detection.
+type Detector struct {
+ recognizers []recognizer
+ stripTag bool
+}
+
+// List of charset recognizers
+var recognizers = []recognizer{
+ newRecognizer_utf8(),
+ newRecognizer_utf16be(),
+ newRecognizer_utf16le(),
+ newRecognizer_utf32be(),
+ newRecognizer_utf32le(),
+ newRecognizer_8859_1_en(),
+ newRecognizer_8859_1_da(),
+ newRecognizer_8859_1_de(),
+ newRecognizer_8859_1_es(),
+ newRecognizer_8859_1_fr(),
+ newRecognizer_8859_1_it(),
+ newRecognizer_8859_1_nl(),
+ newRecognizer_8859_1_no(),
+ newRecognizer_8859_1_pt(),
+ newRecognizer_8859_1_sv(),
+ newRecognizer_8859_2_cs(),
+ newRecognizer_8859_2_hu(),
+ newRecognizer_8859_2_pl(),
+ newRecognizer_8859_2_ro(),
+ newRecognizer_8859_5_ru(),
+ newRecognizer_8859_6_ar(),
+ newRecognizer_8859_7_el(),
+ newRecognizer_8859_8_I_he(),
+ newRecognizer_8859_8_he(),
+ newRecognizer_windows_1251(),
+ newRecognizer_windows_1256(),
+ newRecognizer_KOI8_R(),
+ newRecognizer_8859_9_tr(),
+
+ newRecognizer_sjis(),
+ newRecognizer_gb_18030(),
+ newRecognizer_euc_jp(),
+ newRecognizer_euc_kr(),
+ newRecognizer_big5(),
+
+ newRecognizer_2022JP(),
+ newRecognizer_2022KR(),
+ newRecognizer_2022CN(),
+
+ newRecognizer_IBM424_he_rtl(),
+ newRecognizer_IBM424_he_ltr(),
+ newRecognizer_IBM420_ar_rtl(),
+ newRecognizer_IBM420_ar_ltr(),
+}
+
+// NewTextDetector creates a Detector for plain text.
+func NewTextDetector() *Detector {
+ return &Detector{recognizers, false}
+}
+
+// NewHtmlDetector creates a Detector for Html.
+func NewHtmlDetector() *Detector {
+ return &Detector{recognizers, true}
+}
+
+var (
+ NotDetectedError = errors.New("Charset not detected.")
+)
+
+// DetectBest returns the Result with highest Confidence.
+func (d *Detector) DetectBest(b []byte) (r *Result, err error) {
+ var all []Result
+ if all, err = d.DetectAll(b); err == nil {
+ r = &all[0]
+ }
+ return
+}
+
+// DetectAll returns all Results which have non-zero Confidence. The Results are sorted by Confidence in descending order.
+func (d *Detector) DetectAll(b []byte) ([]Result, error) {
+ input := newRecognizerInput(b, d.stripTag)
+ outputChan := make(chan recognizerOutput)
+ for _, r := range d.recognizers {
+ go matchHelper(r, input, outputChan)
+ }
+ outputs := make([]recognizerOutput, 0, len(d.recognizers))
+ for i := 0; i < len(d.recognizers); i++ {
+ o := <-outputChan
+ if o.Confidence > 0 {
+ outputs = append(outputs, o)
+ }
+ }
+ if len(outputs) == 0 {
+ return nil, NotDetectedError
+ }
+
+ sort.Sort(recognizerOutputs(outputs))
+ dedupOutputs := make([]Result, 0, len(outputs))
+ foundCharsets := make(map[string]struct{}, len(outputs))
+ for _, o := range outputs {
+ if _, found := foundCharsets[o.Charset]; !found {
+ dedupOutputs = append(dedupOutputs, Result(o))
+ foundCharsets[o.Charset] = struct{}{}
+ }
+ }
+ if len(dedupOutputs) == 0 {
+ return nil, NotDetectedError
+ }
+ return dedupOutputs, nil
+}
+
+func matchHelper(r recognizer, input *recognizerInput, outputChan chan<- recognizerOutput) {
+ outputChan <- r.Match(input)
+}
+
+type recognizerOutputs []recognizerOutput
+
+func (r recognizerOutputs) Len() int { return len(r) }
+func (r recognizerOutputs) Less(i, j int) bool { return r[i].Confidence > r[j].Confidence }
+func (r recognizerOutputs) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
diff --git a/vendor/github.com/saintfish/chardet/icu-license.html b/vendor/github.com/saintfish/chardet/icu-license.html
new file mode 100644
index 000000000..d078d0575
--- /dev/null
+++ b/vendor/github.com/saintfish/chardet/icu-license.html
@@ -0,0 +1,51 @@
+
+
+
+
+ICU License - ICU 1.8.1 and later
+
+
+
+
ICU License - ICU 1.8.1 and later
+
+
COPYRIGHT AND PERMISSION NOTICE
+
+
+Copyright (c) 1995-2012 International Business Machines Corporation and others
+
+
+All rights reserved.
+
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, and/or sell
+copies of the Software, and to permit persons
+to whom the Software is furnished to do so, provided that the above
+copyright notice(s) and this permission notice appear in all copies
+of the Software and that both the above copyright notice(s) and this
+permission notice appear in supporting documentation.
+
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
+INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL
+THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM,
+OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER
+RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
+USE OR PERFORMANCE OF THIS SOFTWARE.
+
+
+Except as contained in this notice, the name of a copyright holder shall not be
+used in advertising or otherwise to promote the sale, use or other dealings in
+this Software without prior written authorization of the copyright holder.
+
+
+
+
+All trademarks and registered trademarks mentioned herein are the property of their respective owners.
+
+
+
diff --git a/vendor/github.com/saintfish/chardet/multi_byte.go b/vendor/github.com/saintfish/chardet/multi_byte.go
new file mode 100644
index 000000000..1fab34ce8
--- /dev/null
+++ b/vendor/github.com/saintfish/chardet/multi_byte.go
@@ -0,0 +1,345 @@
+package chardet
+
+import (
+ "errors"
+ "math"
+)
+
+type recognizerMultiByte struct {
+ charset string
+ language string
+ decoder charDecoder
+ commonChars []uint16
+}
+
+type charDecoder interface {
+ DecodeOneChar([]byte) (c uint16, remain []byte, err error)
+}
+
+func (r *recognizerMultiByte) Match(input *recognizerInput) (output recognizerOutput) {
+ return recognizerOutput{
+ Charset: r.charset,
+ Language: r.language,
+ Confidence: r.matchConfidence(input),
+ }
+}
+
+func (r *recognizerMultiByte) matchConfidence(input *recognizerInput) int {
+ raw := input.raw
+ var c uint16
+ var err error
+ var totalCharCount, badCharCount, singleByteCharCount, doubleByteCharCount, commonCharCount int
+ for c, raw, err = r.decoder.DecodeOneChar(raw); len(raw) > 0; c, raw, err = r.decoder.DecodeOneChar(raw) {
+ totalCharCount++
+ if err != nil {
+ badCharCount++
+ } else if c <= 0xFF {
+ singleByteCharCount++
+ } else {
+ doubleByteCharCount++
+ if r.commonChars != nil && binarySearch(r.commonChars, c) {
+ commonCharCount++
+ }
+ }
+ if badCharCount >= 2 && badCharCount*5 >= doubleByteCharCount {
+ return 0
+ }
+ }
+
+ if doubleByteCharCount <= 10 && badCharCount == 0 {
+ if doubleByteCharCount == 0 && totalCharCount < 10 {
+ return 0
+ } else {
+ return 10
+ }
+ }
+
+ if doubleByteCharCount < 20*badCharCount {
+ return 0
+ }
+ if r.commonChars == nil {
+ confidence := 30 + doubleByteCharCount - 20*badCharCount
+ if confidence > 100 {
+ confidence = 100
+ }
+ return confidence
+ }
+ maxVal := math.Log(float64(doubleByteCharCount) / 4)
+ scaleFactor := 90 / maxVal
+ confidence := int(math.Log(float64(commonCharCount)+1)*scaleFactor + 10)
+ if confidence > 100 {
+ confidence = 100
+ }
+ if confidence < 0 {
+ confidence = 0
+ }
+ return confidence
+}
+
+func binarySearch(l []uint16, c uint16) bool {
+ start := 0
+ end := len(l) - 1
+ for start <= end {
+ mid := (start + end) / 2
+ if c == l[mid] {
+ return true
+ } else if c < l[mid] {
+ end = mid - 1
+ } else {
+ start = mid + 1
+ }
+ }
+ return false
+}
+
+var eobError = errors.New("End of input buffer")
+var badCharError = errors.New("Decode a bad char")
+
+type charDecoder_sjis struct {
+}
+
+func (charDecoder_sjis) DecodeOneChar(input []byte) (c uint16, remain []byte, err error) {
+ if len(input) == 0 {
+ return 0, nil, eobError
+ }
+ first := input[0]
+ c = uint16(first)
+ remain = input[1:]
+ if first <= 0x7F || (first > 0xA0 && first <= 0xDF) {
+ return
+ }
+ if len(remain) == 0 {
+ return c, remain, badCharError
+ }
+ second := remain[0]
+ remain = remain[1:]
+ c = c<<8 | uint16(second)
+ if (second >= 0x40 && second <= 0x7F) || (second >= 0x80 && second <= 0xFE) {
+ } else {
+ err = badCharError
+ }
+ return
+}
+
+var commonChars_sjis = []uint16{
+ 0x8140, 0x8141, 0x8142, 0x8145, 0x815b, 0x8169, 0x816a, 0x8175, 0x8176, 0x82a0,
+ 0x82a2, 0x82a4, 0x82a9, 0x82aa, 0x82ab, 0x82ad, 0x82af, 0x82b1, 0x82b3, 0x82b5,
+ 0x82b7, 0x82bd, 0x82be, 0x82c1, 0x82c4, 0x82c5, 0x82c6, 0x82c8, 0x82c9, 0x82cc,
+ 0x82cd, 0x82dc, 0x82e0, 0x82e7, 0x82e8, 0x82e9, 0x82ea, 0x82f0, 0x82f1, 0x8341,
+ 0x8343, 0x834e, 0x834f, 0x8358, 0x835e, 0x8362, 0x8367, 0x8375, 0x8376, 0x8389,
+ 0x838a, 0x838b, 0x838d, 0x8393, 0x8e96, 0x93fa, 0x95aa,
+}
+
+func newRecognizer_sjis() *recognizerMultiByte {
+ return &recognizerMultiByte{
+ "Shift_JIS",
+ "ja",
+ charDecoder_sjis{},
+ commonChars_sjis,
+ }
+}
+
+type charDecoder_euc struct {
+}
+
+func (charDecoder_euc) DecodeOneChar(input []byte) (c uint16, remain []byte, err error) {
+ if len(input) == 0 {
+ return 0, nil, eobError
+ }
+ first := input[0]
+ remain = input[1:]
+ c = uint16(first)
+ if first <= 0x8D {
+ return uint16(first), remain, nil
+ }
+ if len(remain) == 0 {
+ return 0, nil, eobError
+ }
+ second := remain[0]
+ remain = remain[1:]
+ c = c<<8 | uint16(second)
+ if first >= 0xA1 && first <= 0xFE {
+ if second < 0xA1 {
+ err = badCharError
+ }
+ return
+ }
+ if first == 0x8E {
+ if second < 0xA1 {
+ err = badCharError
+ }
+ return
+ }
+ if first == 0x8F {
+ if len(remain) == 0 {
+ return 0, nil, eobError
+ }
+ third := remain[0]
+ remain = remain[1:]
+ c = c<<0 | uint16(third)
+ if third < 0xa1 {
+ err = badCharError
+ }
+ }
+ return
+}
+
+var commonChars_euc_jp = []uint16{
+ 0xa1a1, 0xa1a2, 0xa1a3, 0xa1a6, 0xa1bc, 0xa1ca, 0xa1cb, 0xa1d6, 0xa1d7, 0xa4a2,
+ 0xa4a4, 0xa4a6, 0xa4a8, 0xa4aa, 0xa4ab, 0xa4ac, 0xa4ad, 0xa4af, 0xa4b1, 0xa4b3,
+ 0xa4b5, 0xa4b7, 0xa4b9, 0xa4bb, 0xa4bd, 0xa4bf, 0xa4c0, 0xa4c1, 0xa4c3, 0xa4c4,
+ 0xa4c6, 0xa4c7, 0xa4c8, 0xa4c9, 0xa4ca, 0xa4cb, 0xa4ce, 0xa4cf, 0xa4d0, 0xa4de,
+ 0xa4df, 0xa4e1, 0xa4e2, 0xa4e4, 0xa4e8, 0xa4e9, 0xa4ea, 0xa4eb, 0xa4ec, 0xa4ef,
+ 0xa4f2, 0xa4f3, 0xa5a2, 0xa5a3, 0xa5a4, 0xa5a6, 0xa5a7, 0xa5aa, 0xa5ad, 0xa5af,
+ 0xa5b0, 0xa5b3, 0xa5b5, 0xa5b7, 0xa5b8, 0xa5b9, 0xa5bf, 0xa5c3, 0xa5c6, 0xa5c7,
+ 0xa5c8, 0xa5c9, 0xa5cb, 0xa5d0, 0xa5d5, 0xa5d6, 0xa5d7, 0xa5de, 0xa5e0, 0xa5e1,
+ 0xa5e5, 0xa5e9, 0xa5ea, 0xa5eb, 0xa5ec, 0xa5ed, 0xa5f3, 0xb8a9, 0xb9d4, 0xbaee,
+ 0xbbc8, 0xbef0, 0xbfb7, 0xc4ea, 0xc6fc, 0xc7bd, 0xcab8, 0xcaf3, 0xcbdc, 0xcdd1,
+}
+
+var commonChars_euc_kr = []uint16{
+ 0xb0a1, 0xb0b3, 0xb0c5, 0xb0cd, 0xb0d4, 0xb0e6, 0xb0ed, 0xb0f8, 0xb0fa, 0xb0fc,
+ 0xb1b8, 0xb1b9, 0xb1c7, 0xb1d7, 0xb1e2, 0xb3aa, 0xb3bb, 0xb4c2, 0xb4cf, 0xb4d9,
+ 0xb4eb, 0xb5a5, 0xb5b5, 0xb5bf, 0xb5c7, 0xb5e9, 0xb6f3, 0xb7af, 0xb7c2, 0xb7ce,
+ 0xb8a6, 0xb8ae, 0xb8b6, 0xb8b8, 0xb8bb, 0xb8e9, 0xb9ab, 0xb9ae, 0xb9cc, 0xb9ce,
+ 0xb9fd, 0xbab8, 0xbace, 0xbad0, 0xbaf1, 0xbbe7, 0xbbf3, 0xbbfd, 0xbcad, 0xbcba,
+ 0xbcd2, 0xbcf6, 0xbdba, 0xbdc0, 0xbdc3, 0xbdc5, 0xbec6, 0xbec8, 0xbedf, 0xbeee,
+ 0xbef8, 0xbefa, 0xbfa1, 0xbfa9, 0xbfc0, 0xbfe4, 0xbfeb, 0xbfec, 0xbff8, 0xc0a7,
+ 0xc0af, 0xc0b8, 0xc0ba, 0xc0bb, 0xc0bd, 0xc0c7, 0xc0cc, 0xc0ce, 0xc0cf, 0xc0d6,
+ 0xc0da, 0xc0e5, 0xc0fb, 0xc0fc, 0xc1a4, 0xc1a6, 0xc1b6, 0xc1d6, 0xc1df, 0xc1f6,
+ 0xc1f8, 0xc4a1, 0xc5cd, 0xc6ae, 0xc7cf, 0xc7d1, 0xc7d2, 0xc7d8, 0xc7e5, 0xc8ad,
+}
+
+func newRecognizer_euc_jp() *recognizerMultiByte {
+ return &recognizerMultiByte{
+ "EUC-JP",
+ "ja",
+ charDecoder_euc{},
+ commonChars_euc_jp,
+ }
+}
+
+func newRecognizer_euc_kr() *recognizerMultiByte {
+ return &recognizerMultiByte{
+ "EUC-KR",
+ "ko",
+ charDecoder_euc{},
+ commonChars_euc_kr,
+ }
+}
+
+type charDecoder_big5 struct {
+}
+
+func (charDecoder_big5) DecodeOneChar(input []byte) (c uint16, remain []byte, err error) {
+ if len(input) == 0 {
+ return 0, nil, eobError
+ }
+ first := input[0]
+ remain = input[1:]
+ c = uint16(first)
+ if first <= 0x7F || first == 0xFF {
+ return
+ }
+ if len(remain) == 0 {
+ return c, nil, eobError
+ }
+ second := remain[0]
+ remain = remain[1:]
+ c = c<<8 | uint16(second)
+ if second < 0x40 || second == 0x7F || second == 0xFF {
+ err = badCharError
+ }
+ return
+}
+
+var commonChars_big5 = []uint16{
+ 0xa140, 0xa141, 0xa142, 0xa143, 0xa147, 0xa149, 0xa175, 0xa176, 0xa440, 0xa446,
+ 0xa447, 0xa448, 0xa451, 0xa454, 0xa457, 0xa464, 0xa46a, 0xa46c, 0xa477, 0xa4a3,
+ 0xa4a4, 0xa4a7, 0xa4c1, 0xa4ce, 0xa4d1, 0xa4df, 0xa4e8, 0xa4fd, 0xa540, 0xa548,
+ 0xa558, 0xa569, 0xa5cd, 0xa5e7, 0xa657, 0xa661, 0xa662, 0xa668, 0xa670, 0xa6a8,
+ 0xa6b3, 0xa6b9, 0xa6d3, 0xa6db, 0xa6e6, 0xa6f2, 0xa740, 0xa751, 0xa759, 0xa7da,
+ 0xa8a3, 0xa8a5, 0xa8ad, 0xa8d1, 0xa8d3, 0xa8e4, 0xa8fc, 0xa9c0, 0xa9d2, 0xa9f3,
+ 0xaa6b, 0xaaba, 0xaabe, 0xaacc, 0xaafc, 0xac47, 0xac4f, 0xacb0, 0xacd2, 0xad59,
+ 0xaec9, 0xafe0, 0xb0ea, 0xb16f, 0xb2b3, 0xb2c4, 0xb36f, 0xb44c, 0xb44e, 0xb54c,
+ 0xb5a5, 0xb5bd, 0xb5d0, 0xb5d8, 0xb671, 0xb7ed, 0xb867, 0xb944, 0xbad8, 0xbb44,
+ 0xbba1, 0xbdd1, 0xc2c4, 0xc3b9, 0xc440, 0xc45f,
+}
+
+func newRecognizer_big5() *recognizerMultiByte {
+ return &recognizerMultiByte{
+ "Big5",
+ "zh",
+ charDecoder_big5{},
+ commonChars_big5,
+ }
+}
+
+type charDecoder_gb_18030 struct {
+}
+
+func (charDecoder_gb_18030) DecodeOneChar(input []byte) (c uint16, remain []byte, err error) {
+ if len(input) == 0 {
+ return 0, nil, eobError
+ }
+ first := input[0]
+ remain = input[1:]
+ c = uint16(first)
+ if first <= 0x80 {
+ return
+ }
+ if len(remain) == 0 {
+ return 0, nil, eobError
+ }
+ second := remain[0]
+ remain = remain[1:]
+ c = c<<8 | uint16(second)
+ if first >= 0x81 && first <= 0xFE {
+ if (second >= 0x40 && second <= 0x7E) || (second >= 0x80 && second <= 0xFE) {
+ return
+ }
+
+ if second >= 0x30 && second <= 0x39 {
+ if len(remain) == 0 {
+ return 0, nil, eobError
+ }
+ third := remain[0]
+ remain = remain[1:]
+ if third >= 0x81 && third <= 0xFE {
+ if len(remain) == 0 {
+ return 0, nil, eobError
+ }
+ fourth := remain[0]
+ remain = remain[1:]
+ if fourth >= 0x30 && fourth <= 0x39 {
+ c = c<<16 | uint16(third)<<8 | uint16(fourth)
+ return
+ }
+ }
+ }
+ err = badCharError
+ }
+ return
+}
+
+var commonChars_gb_18030 = []uint16{
+ 0xa1a1, 0xa1a2, 0xa1a3, 0xa1a4, 0xa1b0, 0xa1b1, 0xa1f1, 0xa1f3, 0xa3a1, 0xa3ac,
+ 0xa3ba, 0xb1a8, 0xb1b8, 0xb1be, 0xb2bb, 0xb3c9, 0xb3f6, 0xb4f3, 0xb5bd, 0xb5c4,
+ 0xb5e3, 0xb6af, 0xb6d4, 0xb6e0, 0xb7a2, 0xb7a8, 0xb7bd, 0xb7d6, 0xb7dd, 0xb8b4,
+ 0xb8df, 0xb8f6, 0xb9ab, 0xb9c9, 0xb9d8, 0xb9fa, 0xb9fd, 0xbacd, 0xbba7, 0xbbd6,
+ 0xbbe1, 0xbbfa, 0xbcbc, 0xbcdb, 0xbcfe, 0xbdcc, 0xbecd, 0xbedd, 0xbfb4, 0xbfc6,
+ 0xbfc9, 0xc0b4, 0xc0ed, 0xc1cb, 0xc2db, 0xc3c7, 0xc4dc, 0xc4ea, 0xc5cc, 0xc6f7,
+ 0xc7f8, 0xc8ab, 0xc8cb, 0xc8d5, 0xc8e7, 0xc9cf, 0xc9fa, 0xcab1, 0xcab5, 0xcac7,
+ 0xcad0, 0xcad6, 0xcaf5, 0xcafd, 0xccec, 0xcdf8, 0xceaa, 0xcec4, 0xced2, 0xcee5,
+ 0xcfb5, 0xcfc2, 0xcfd6, 0xd0c2, 0xd0c5, 0xd0d0, 0xd0d4, 0xd1a7, 0xd2aa, 0xd2b2,
+ 0xd2b5, 0xd2bb, 0xd2d4, 0xd3c3, 0xd3d0, 0xd3fd, 0xd4c2, 0xd4da, 0xd5e2, 0xd6d0,
+}
+
+func newRecognizer_gb_18030() *recognizerMultiByte {
+ return &recognizerMultiByte{
+ "GB-18030",
+ "zh",
+ charDecoder_gb_18030{},
+ commonChars_gb_18030,
+ }
+}
diff --git a/vendor/github.com/saintfish/chardet/recognizer.go b/vendor/github.com/saintfish/chardet/recognizer.go
new file mode 100644
index 000000000..1bf8461c3
--- /dev/null
+++ b/vendor/github.com/saintfish/chardet/recognizer.go
@@ -0,0 +1,83 @@
+package chardet
+
+type recognizer interface {
+ Match(*recognizerInput) recognizerOutput
+}
+
+type recognizerOutput Result
+
+type recognizerInput struct {
+ raw []byte
+ input []byte
+ tagStripped bool
+ byteStats []int
+ hasC1Bytes bool
+}
+
+func newRecognizerInput(raw []byte, stripTag bool) *recognizerInput {
+ input, stripped := mayStripInput(raw, stripTag)
+ byteStats := computeByteStats(input)
+ return &recognizerInput{
+ raw: raw,
+ input: input,
+ tagStripped: stripped,
+ byteStats: byteStats,
+ hasC1Bytes: computeHasC1Bytes(byteStats),
+ }
+}
+
+func mayStripInput(raw []byte, stripTag bool) (out []byte, stripped bool) {
+ const inputBufferSize = 8192
+ out = make([]byte, 0, inputBufferSize)
+ var badTags, openTags int32
+ var inMarkup bool = false
+ stripped = false
+ if stripTag {
+ stripped = true
+ for _, c := range raw {
+ if c == '<' {
+ if inMarkup {
+ badTags += 1
+ }
+ inMarkup = true
+ openTags += 1
+ }
+ if !inMarkup {
+ out = append(out, c)
+ if len(out) >= inputBufferSize {
+ break
+ }
+ }
+ if c == '>' {
+ inMarkup = false
+ }
+ }
+ }
+ if openTags < 5 || openTags/5 < badTags || (len(out) < 100 && len(raw) > 600) {
+ limit := len(raw)
+ if limit > inputBufferSize {
+ limit = inputBufferSize
+ }
+ out = make([]byte, limit)
+ copy(out, raw[:limit])
+ stripped = false
+ }
+ return
+}
+
+func computeByteStats(input []byte) []int {
+ r := make([]int, 256)
+ for _, c := range input {
+ r[c] += 1
+ }
+ return r
+}
+
+func computeHasC1Bytes(byteStats []int) bool {
+ for _, count := range byteStats[0x80 : 0x9F+1] {
+ if count > 0 {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/saintfish/chardet/single_byte.go b/vendor/github.com/saintfish/chardet/single_byte.go
new file mode 100644
index 000000000..efe41c901
--- /dev/null
+++ b/vendor/github.com/saintfish/chardet/single_byte.go
@@ -0,0 +1,882 @@
+package chardet
+
+// Recognizer for single byte charset family
+type recognizerSingleByte struct {
+ charset string
+ hasC1ByteCharset string
+ language string
+ charMap *[256]byte
+ ngram *[64]uint32
+}
+
+func (r *recognizerSingleByte) Match(input *recognizerInput) recognizerOutput {
+ var charset string = r.charset
+ if input.hasC1Bytes && len(r.hasC1ByteCharset) > 0 {
+ charset = r.hasC1ByteCharset
+ }
+ return recognizerOutput{
+ Charset: charset,
+ Language: r.language,
+ Confidence: r.parseNgram(input.input),
+ }
+}
+
+type ngramState struct {
+ ngram uint32
+ ignoreSpace bool
+ ngramCount, ngramHit uint32
+ table *[64]uint32
+}
+
+func newNgramState(table *[64]uint32) *ngramState {
+ return &ngramState{
+ ngram: 0,
+ ignoreSpace: false,
+ ngramCount: 0,
+ ngramHit: 0,
+ table: table,
+ }
+}
+
+func (s *ngramState) AddByte(b byte) {
+ const ngramMask = 0xFFFFFF
+ if !(b == 0x20 && s.ignoreSpace) {
+ s.ngram = ((s.ngram << 8) | uint32(b)) & ngramMask
+ s.ignoreSpace = (s.ngram == 0x20)
+ s.ngramCount++
+ if s.lookup() {
+ s.ngramHit++
+ }
+ }
+ s.ignoreSpace = (b == 0x20)
+}
+
+func (s *ngramState) HitRate() float32 {
+ if s.ngramCount == 0 {
+ return 0
+ }
+ return float32(s.ngramHit) / float32(s.ngramCount)
+}
+
+func (s *ngramState) lookup() bool {
+ var index int
+ if s.table[index+32] <= s.ngram {
+ index += 32
+ }
+ if s.table[index+16] <= s.ngram {
+ index += 16
+ }
+ if s.table[index+8] <= s.ngram {
+ index += 8
+ }
+ if s.table[index+4] <= s.ngram {
+ index += 4
+ }
+ if s.table[index+2] <= s.ngram {
+ index += 2
+ }
+ if s.table[index+1] <= s.ngram {
+ index += 1
+ }
+ if s.table[index] > s.ngram {
+ index -= 1
+ }
+ if index < 0 || s.table[index] != s.ngram {
+ return false
+ }
+ return true
+}
+
+func (r *recognizerSingleByte) parseNgram(input []byte) int {
+ state := newNgramState(r.ngram)
+ for _, inChar := range input {
+ c := r.charMap[inChar]
+ if c != 0 {
+ state.AddByte(c)
+ }
+ }
+ state.AddByte(0x20)
+ rate := state.HitRate()
+ if rate > 0.33 {
+ return 98
+ }
+ return int(rate * 300)
+}
+
+var charMap_8859_1 = [256]byte{
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0xAA, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0xB5, 0x20, 0x20,
+ 0x20, 0x20, 0xBA, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0x20,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0x20,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF,
+}
+
+var ngrams_8859_1_en = [64]uint32{
+ 0x206120, 0x20616E, 0x206265, 0x20636F, 0x20666F, 0x206861, 0x206865, 0x20696E, 0x206D61, 0x206F66, 0x207072, 0x207265, 0x207361, 0x207374, 0x207468, 0x20746F,
+ 0x207768, 0x616964, 0x616C20, 0x616E20, 0x616E64, 0x617320, 0x617420, 0x617465, 0x617469, 0x642061, 0x642074, 0x652061, 0x652073, 0x652074, 0x656420, 0x656E74,
+ 0x657220, 0x657320, 0x666F72, 0x686174, 0x686520, 0x686572, 0x696420, 0x696E20, 0x696E67, 0x696F6E, 0x697320, 0x6E2061, 0x6E2074, 0x6E6420, 0x6E6720, 0x6E7420,
+ 0x6F6620, 0x6F6E20, 0x6F7220, 0x726520, 0x727320, 0x732061, 0x732074, 0x736169, 0x737420, 0x742074, 0x746572, 0x746861, 0x746865, 0x74696F, 0x746F20, 0x747320,
+}
+
+var ngrams_8859_1_da = [64]uint32{
+ 0x206166, 0x206174, 0x206465, 0x20656E, 0x206572, 0x20666F, 0x206861, 0x206920, 0x206D65, 0x206F67, 0x2070E5, 0x207369, 0x207374, 0x207469, 0x207669, 0x616620,
+ 0x616E20, 0x616E64, 0x617220, 0x617420, 0x646520, 0x64656E, 0x646572, 0x646574, 0x652073, 0x656420, 0x656465, 0x656E20, 0x656E64, 0x657220, 0x657265, 0x657320,
+ 0x657420, 0x666F72, 0x676520, 0x67656E, 0x676572, 0x696765, 0x696C20, 0x696E67, 0x6B6520, 0x6B6B65, 0x6C6572, 0x6C6967, 0x6C6C65, 0x6D6564, 0x6E6465, 0x6E6520,
+ 0x6E6720, 0x6E6765, 0x6F6720, 0x6F6D20, 0x6F7220, 0x70E520, 0x722064, 0x722065, 0x722073, 0x726520, 0x737465, 0x742073, 0x746520, 0x746572, 0x74696C, 0x766572,
+}
+
+var ngrams_8859_1_de = [64]uint32{
+ 0x20616E, 0x206175, 0x206265, 0x206461, 0x206465, 0x206469, 0x206569, 0x206765, 0x206861, 0x20696E, 0x206D69, 0x207363, 0x207365, 0x20756E, 0x207665, 0x20766F,
+ 0x207765, 0x207A75, 0x626572, 0x636820, 0x636865, 0x636874, 0x646173, 0x64656E, 0x646572, 0x646965, 0x652064, 0x652073, 0x65696E, 0x656974, 0x656E20, 0x657220,
+ 0x657320, 0x67656E, 0x68656E, 0x687420, 0x696368, 0x696520, 0x696E20, 0x696E65, 0x697420, 0x6C6963, 0x6C6C65, 0x6E2061, 0x6E2064, 0x6E2073, 0x6E6420, 0x6E6465,
+ 0x6E6520, 0x6E6720, 0x6E6765, 0x6E7465, 0x722064, 0x726465, 0x726569, 0x736368, 0x737465, 0x742064, 0x746520, 0x74656E, 0x746572, 0x756E64, 0x756E67, 0x766572,
+}
+
+var ngrams_8859_1_es = [64]uint32{
+ 0x206120, 0x206361, 0x20636F, 0x206465, 0x20656C, 0x20656E, 0x206573, 0x20696E, 0x206C61, 0x206C6F, 0x207061, 0x20706F, 0x207072, 0x207175, 0x207265, 0x207365,
+ 0x20756E, 0x207920, 0x612063, 0x612064, 0x612065, 0x61206C, 0x612070, 0x616369, 0x61646F, 0x616C20, 0x617220, 0x617320, 0x6369F3, 0x636F6E, 0x646520, 0x64656C,
+ 0x646F20, 0x652064, 0x652065, 0x65206C, 0x656C20, 0x656E20, 0x656E74, 0x657320, 0x657374, 0x69656E, 0x69F36E, 0x6C6120, 0x6C6F73, 0x6E2065, 0x6E7465, 0x6F2064,
+ 0x6F2065, 0x6F6E20, 0x6F7220, 0x6F7320, 0x706172, 0x717565, 0x726120, 0x726573, 0x732064, 0x732065, 0x732070, 0x736520, 0x746520, 0x746F20, 0x756520, 0xF36E20,
+}
+
+var ngrams_8859_1_fr = [64]uint32{
+ 0x206175, 0x20636F, 0x206461, 0x206465, 0x206475, 0x20656E, 0x206574, 0x206C61, 0x206C65, 0x207061, 0x20706F, 0x207072, 0x207175, 0x207365, 0x20736F, 0x20756E,
+ 0x20E020, 0x616E74, 0x617469, 0x636520, 0x636F6E, 0x646520, 0x646573, 0x647520, 0x652061, 0x652063, 0x652064, 0x652065, 0x65206C, 0x652070, 0x652073, 0x656E20,
+ 0x656E74, 0x657220, 0x657320, 0x657420, 0x657572, 0x696F6E, 0x697320, 0x697420, 0x6C6120, 0x6C6520, 0x6C6573, 0x6D656E, 0x6E2064, 0x6E6520, 0x6E7320, 0x6E7420,
+ 0x6F6E20, 0x6F6E74, 0x6F7572, 0x717565, 0x72206C, 0x726520, 0x732061, 0x732064, 0x732065, 0x73206C, 0x732070, 0x742064, 0x746520, 0x74696F, 0x756520, 0x757220,
+}
+
+var ngrams_8859_1_it = [64]uint32{
+ 0x20616C, 0x206368, 0x20636F, 0x206465, 0x206469, 0x206520, 0x20696C, 0x20696E, 0x206C61, 0x207065, 0x207072, 0x20756E, 0x612063, 0x612064, 0x612070, 0x612073,
+ 0x61746F, 0x636865, 0x636F6E, 0x64656C, 0x646920, 0x652061, 0x652063, 0x652064, 0x652069, 0x65206C, 0x652070, 0x652073, 0x656C20, 0x656C6C, 0x656E74, 0x657220,
+ 0x686520, 0x692061, 0x692063, 0x692064, 0x692073, 0x696120, 0x696C20, 0x696E20, 0x696F6E, 0x6C6120, 0x6C6520, 0x6C6920, 0x6C6C61, 0x6E6520, 0x6E6920, 0x6E6F20,
+ 0x6E7465, 0x6F2061, 0x6F2064, 0x6F2069, 0x6F2073, 0x6F6E20, 0x6F6E65, 0x706572, 0x726120, 0x726520, 0x736920, 0x746120, 0x746520, 0x746920, 0x746F20, 0x7A696F,
+}
+
+var ngrams_8859_1_nl = [64]uint32{
+ 0x20616C, 0x206265, 0x206461, 0x206465, 0x206469, 0x206565, 0x20656E, 0x206765, 0x206865, 0x20696E, 0x206D61, 0x206D65, 0x206F70, 0x207465, 0x207661, 0x207665,
+ 0x20766F, 0x207765, 0x207A69, 0x61616E, 0x616172, 0x616E20, 0x616E64, 0x617220, 0x617420, 0x636874, 0x646520, 0x64656E, 0x646572, 0x652062, 0x652076, 0x65656E,
+ 0x656572, 0x656E20, 0x657220, 0x657273, 0x657420, 0x67656E, 0x686574, 0x696520, 0x696E20, 0x696E67, 0x697320, 0x6E2062, 0x6E2064, 0x6E2065, 0x6E2068, 0x6E206F,
+ 0x6E2076, 0x6E6465, 0x6E6720, 0x6F6E64, 0x6F6F72, 0x6F7020, 0x6F7220, 0x736368, 0x737465, 0x742064, 0x746520, 0x74656E, 0x746572, 0x76616E, 0x766572, 0x766F6F,
+}
+
+var ngrams_8859_1_no = [64]uint32{
+ 0x206174, 0x206176, 0x206465, 0x20656E, 0x206572, 0x20666F, 0x206861, 0x206920, 0x206D65, 0x206F67, 0x2070E5, 0x207365, 0x20736B, 0x20736F, 0x207374, 0x207469,
+ 0x207669, 0x20E520, 0x616E64, 0x617220, 0x617420, 0x646520, 0x64656E, 0x646574, 0x652073, 0x656420, 0x656E20, 0x656E65, 0x657220, 0x657265, 0x657420, 0x657474,
+ 0x666F72, 0x67656E, 0x696B6B, 0x696C20, 0x696E67, 0x6B6520, 0x6B6B65, 0x6C6520, 0x6C6C65, 0x6D6564, 0x6D656E, 0x6E2073, 0x6E6520, 0x6E6720, 0x6E6765, 0x6E6E65,
+ 0x6F6720, 0x6F6D20, 0x6F7220, 0x70E520, 0x722073, 0x726520, 0x736F6D, 0x737465, 0x742073, 0x746520, 0x74656E, 0x746572, 0x74696C, 0x747420, 0x747465, 0x766572,
+}
+
+var ngrams_8859_1_pt = [64]uint32{
+ 0x206120, 0x20636F, 0x206461, 0x206465, 0x20646F, 0x206520, 0x206573, 0x206D61, 0x206E6F, 0x206F20, 0x207061, 0x20706F, 0x207072, 0x207175, 0x207265, 0x207365,
+ 0x20756D, 0x612061, 0x612063, 0x612064, 0x612070, 0x616465, 0x61646F, 0x616C20, 0x617220, 0x617261, 0x617320, 0x636F6D, 0x636F6E, 0x646120, 0x646520, 0x646F20,
+ 0x646F73, 0x652061, 0x652064, 0x656D20, 0x656E74, 0x657320, 0x657374, 0x696120, 0x696361, 0x6D656E, 0x6E7465, 0x6E746F, 0x6F2061, 0x6F2063, 0x6F2064, 0x6F2065,
+ 0x6F2070, 0x6F7320, 0x706172, 0x717565, 0x726120, 0x726573, 0x732061, 0x732064, 0x732065, 0x732070, 0x737461, 0x746520, 0x746F20, 0x756520, 0xE36F20, 0xE7E36F,
+}
+
+var ngrams_8859_1_sv = [64]uint32{
+ 0x206174, 0x206176, 0x206465, 0x20656E, 0x2066F6, 0x206861, 0x206920, 0x20696E, 0x206B6F, 0x206D65, 0x206F63, 0x2070E5, 0x20736B, 0x20736F, 0x207374, 0x207469,
+ 0x207661, 0x207669, 0x20E472, 0x616465, 0x616E20, 0x616E64, 0x617220, 0x617474, 0x636820, 0x646520, 0x64656E, 0x646572, 0x646574, 0x656420, 0x656E20, 0x657220,
+ 0x657420, 0x66F672, 0x67656E, 0x696C6C, 0x696E67, 0x6B6120, 0x6C6C20, 0x6D6564, 0x6E2073, 0x6E6120, 0x6E6465, 0x6E6720, 0x6E6765, 0x6E696E, 0x6F6368, 0x6F6D20,
+ 0x6F6E20, 0x70E520, 0x722061, 0x722073, 0x726120, 0x736B61, 0x736F6D, 0x742073, 0x746120, 0x746520, 0x746572, 0x74696C, 0x747420, 0x766172, 0xE47220, 0xF67220,
+}
+
+func newRecognizer_8859_1(language string, ngram *[64]uint32) *recognizerSingleByte {
+ return &recognizerSingleByte{
+ charset: "ISO-8859-1",
+ hasC1ByteCharset: "windows-1252",
+ language: language,
+ charMap: &charMap_8859_1,
+ ngram: ngram,
+ }
+}
+
+func newRecognizer_8859_1_en() *recognizerSingleByte {
+ return newRecognizer_8859_1("en", &ngrams_8859_1_en)
+}
+func newRecognizer_8859_1_da() *recognizerSingleByte {
+ return newRecognizer_8859_1("da", &ngrams_8859_1_da)
+}
+func newRecognizer_8859_1_de() *recognizerSingleByte {
+ return newRecognizer_8859_1("de", &ngrams_8859_1_de)
+}
+func newRecognizer_8859_1_es() *recognizerSingleByte {
+ return newRecognizer_8859_1("es", &ngrams_8859_1_es)
+}
+func newRecognizer_8859_1_fr() *recognizerSingleByte {
+ return newRecognizer_8859_1("fr", &ngrams_8859_1_fr)
+}
+func newRecognizer_8859_1_it() *recognizerSingleByte {
+ return newRecognizer_8859_1("it", &ngrams_8859_1_it)
+}
+func newRecognizer_8859_1_nl() *recognizerSingleByte {
+ return newRecognizer_8859_1("nl", &ngrams_8859_1_nl)
+}
+func newRecognizer_8859_1_no() *recognizerSingleByte {
+ return newRecognizer_8859_1("no", &ngrams_8859_1_no)
+}
+func newRecognizer_8859_1_pt() *recognizerSingleByte {
+ return newRecognizer_8859_1("pt", &ngrams_8859_1_pt)
+}
+func newRecognizer_8859_1_sv() *recognizerSingleByte {
+ return newRecognizer_8859_1("sv", &ngrams_8859_1_sv)
+}
+
+var charMap_8859_2 = [256]byte{
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0xB1, 0x20, 0xB3, 0x20, 0xB5, 0xB6, 0x20,
+ 0x20, 0xB9, 0xBA, 0xBB, 0xBC, 0x20, 0xBE, 0xBF,
+ 0x20, 0xB1, 0x20, 0xB3, 0x20, 0xB5, 0xB6, 0xB7,
+ 0x20, 0xB9, 0xBA, 0xBB, 0xBC, 0x20, 0xBE, 0xBF,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0x20,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0x20,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0x20,
+}
+
+var ngrams_8859_2_cs = [64]uint32{
+ 0x206120, 0x206279, 0x20646F, 0x206A65, 0x206E61, 0x206E65, 0x206F20, 0x206F64, 0x20706F, 0x207072, 0x2070F8, 0x20726F, 0x207365, 0x20736F, 0x207374, 0x20746F,
+ 0x207620, 0x207679, 0x207A61, 0x612070, 0x636520, 0x636820, 0x652070, 0x652073, 0x652076, 0x656D20, 0x656EED, 0x686F20, 0x686F64, 0x697374, 0x6A6520, 0x6B7465,
+ 0x6C6520, 0x6C6920, 0x6E6120, 0x6EE920, 0x6EEC20, 0x6EED20, 0x6F2070, 0x6F646E, 0x6F6A69, 0x6F7374, 0x6F7520, 0x6F7661, 0x706F64, 0x706F6A, 0x70726F, 0x70F865,
+ 0x736520, 0x736F75, 0x737461, 0x737469, 0x73746E, 0x746572, 0x746EED, 0x746F20, 0x752070, 0xBE6520, 0xE16EED, 0xE9686F, 0xED2070, 0xED2073, 0xED6D20, 0xF86564,
+}
+
+var ngrams_8859_2_hu = [64]uint32{
+ 0x206120, 0x20617A, 0x206265, 0x206567, 0x20656C, 0x206665, 0x206861, 0x20686F, 0x206973, 0x206B65, 0x206B69, 0x206BF6, 0x206C65, 0x206D61, 0x206D65, 0x206D69,
+ 0x206E65, 0x20737A, 0x207465, 0x20E973, 0x612061, 0x61206B, 0x61206D, 0x612073, 0x616B20, 0x616E20, 0x617A20, 0x62616E, 0x62656E, 0x656779, 0x656B20, 0x656C20,
+ 0x656C65, 0x656D20, 0x656E20, 0x657265, 0x657420, 0x657465, 0x657474, 0x677920, 0x686F67, 0x696E74, 0x697320, 0x6B2061, 0x6BF67A, 0x6D6567, 0x6D696E, 0x6E2061,
+ 0x6E616B, 0x6E656B, 0x6E656D, 0x6E7420, 0x6F6779, 0x732061, 0x737A65, 0x737A74, 0x737AE1, 0x73E967, 0x742061, 0x747420, 0x74E173, 0x7A6572, 0xE16E20, 0xE97320,
+}
+
+var ngrams_8859_2_pl = [64]uint32{
+ 0x20637A, 0x20646F, 0x206920, 0x206A65, 0x206B6F, 0x206D61, 0x206D69, 0x206E61, 0x206E69, 0x206F64, 0x20706F, 0x207072, 0x207369, 0x207720, 0x207769, 0x207779,
+ 0x207A20, 0x207A61, 0x612070, 0x612077, 0x616E69, 0x636820, 0x637A65, 0x637A79, 0x646F20, 0x647A69, 0x652070, 0x652073, 0x652077, 0x65207A, 0x65676F, 0x656A20,
+ 0x656D20, 0x656E69, 0x676F20, 0x696120, 0x696520, 0x69656A, 0x6B6120, 0x6B6920, 0x6B6965, 0x6D6965, 0x6E6120, 0x6E6961, 0x6E6965, 0x6F2070, 0x6F7761, 0x6F7769,
+ 0x706F6C, 0x707261, 0x70726F, 0x70727A, 0x727A65, 0x727A79, 0x7369EA, 0x736B69, 0x737461, 0x776965, 0x796368, 0x796D20, 0x7A6520, 0x7A6965, 0x7A7920, 0xF37720,
+}
+
+var ngrams_8859_2_ro = [64]uint32{
+ 0x206120, 0x206163, 0x206361, 0x206365, 0x20636F, 0x206375, 0x206465, 0x206469, 0x206C61, 0x206D61, 0x207065, 0x207072, 0x207365, 0x2073E3, 0x20756E, 0x20BA69,
+ 0x20EE6E, 0x612063, 0x612064, 0x617265, 0x617420, 0x617465, 0x617520, 0x636172, 0x636F6E, 0x637520, 0x63E320, 0x646520, 0x652061, 0x652063, 0x652064, 0x652070,
+ 0x652073, 0x656120, 0x656920, 0x656C65, 0x656E74, 0x657374, 0x692061, 0x692063, 0x692064, 0x692070, 0x696520, 0x696920, 0x696E20, 0x6C6120, 0x6C6520, 0x6C6F72,
+ 0x6C7569, 0x6E6520, 0x6E7472, 0x6F7220, 0x70656E, 0x726520, 0x726561, 0x727520, 0x73E320, 0x746520, 0x747275, 0x74E320, 0x756920, 0x756C20, 0xBA6920, 0xEE6E20,
+}
+
+func newRecognizer_8859_2(language string, ngram *[64]uint32) *recognizerSingleByte {
+ return &recognizerSingleByte{
+ charset: "ISO-8859-2",
+ hasC1ByteCharset: "windows-1250",
+ language: language,
+ charMap: &charMap_8859_2,
+ ngram: ngram,
+ }
+}
+
+func newRecognizer_8859_2_cs() *recognizerSingleByte {
+ return newRecognizer_8859_1("cs", &ngrams_8859_2_cs)
+}
+func newRecognizer_8859_2_hu() *recognizerSingleByte {
+ return newRecognizer_8859_1("hu", &ngrams_8859_2_hu)
+}
+func newRecognizer_8859_2_pl() *recognizerSingleByte {
+ return newRecognizer_8859_1("pl", &ngrams_8859_2_pl)
+}
+func newRecognizer_8859_2_ro() *recognizerSingleByte {
+ return newRecognizer_8859_1("ro", &ngrams_8859_2_ro)
+}
+
+var charMap_8859_5 = [256]byte{
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0x20, 0xFE, 0xFF,
+ 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
+ 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
+ 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0x20, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0x20, 0xFE, 0xFF,
+}
+
+var ngrams_8859_5_ru = [64]uint32{
+ 0x20D220, 0x20D2DE, 0x20D4DE, 0x20D7D0, 0x20D820, 0x20DAD0, 0x20DADE, 0x20DDD0, 0x20DDD5, 0x20DED1, 0x20DFDE, 0x20DFE0, 0x20E0D0, 0x20E1DE, 0x20E1E2, 0x20E2DE,
+ 0x20E7E2, 0x20EDE2, 0xD0DDD8, 0xD0E2EC, 0xD3DE20, 0xD5DBEC, 0xD5DDD8, 0xD5E1E2, 0xD5E220, 0xD820DF, 0xD8D520, 0xD8D820, 0xD8EF20, 0xDBD5DD, 0xDBD820, 0xDBECDD,
+ 0xDDD020, 0xDDD520, 0xDDD8D5, 0xDDD8EF, 0xDDDE20, 0xDDDED2, 0xDE20D2, 0xDE20DF, 0xDE20E1, 0xDED220, 0xDED2D0, 0xDED3DE, 0xDED920, 0xDEDBEC, 0xDEDC20, 0xDEE1E2,
+ 0xDFDEDB, 0xDFE0D5, 0xDFE0D8, 0xDFE0DE, 0xE0D0D2, 0xE0D5D4, 0xE1E2D0, 0xE1E2D2, 0xE1E2D8, 0xE1EF20, 0xE2D5DB, 0xE2DE20, 0xE2DEE0, 0xE2EC20, 0xE7E2DE, 0xEBE520,
+}
+
+func newRecognizer_8859_5(language string, ngram *[64]uint32) *recognizerSingleByte {
+ return &recognizerSingleByte{
+ charset: "ISO-8859-5",
+ language: language,
+ charMap: &charMap_8859_5,
+ ngram: ngram,
+ }
+}
+
+func newRecognizer_8859_5_ru() *recognizerSingleByte {
+ return newRecognizer_8859_5("ru", &ngrams_8859_5_ru)
+}
+
+var charMap_8859_6 = [256]byte{
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
+ 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
+ 0xD8, 0xD9, 0xDA, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+}
+
+var ngrams_8859_6_ar = [64]uint32{
+ 0x20C7E4, 0x20C7E6, 0x20C8C7, 0x20D9E4, 0x20E1EA, 0x20E4E4, 0x20E5E6, 0x20E8C7, 0xC720C7, 0xC7C120, 0xC7CA20, 0xC7D120, 0xC7E420, 0xC7E4C3, 0xC7E4C7, 0xC7E4C8,
+ 0xC7E4CA, 0xC7E4CC, 0xC7E4CD, 0xC7E4CF, 0xC7E4D3, 0xC7E4D9, 0xC7E4E2, 0xC7E4E5, 0xC7E4E8, 0xC7E4EA, 0xC7E520, 0xC7E620, 0xC7E6CA, 0xC820C7, 0xC920C7, 0xC920E1,
+ 0xC920E4, 0xC920E5, 0xC920E8, 0xCA20C7, 0xCF20C7, 0xCFC920, 0xD120C7, 0xD1C920, 0xD320C7, 0xD920C7, 0xD9E4E9, 0xE1EA20, 0xE420C7, 0xE4C920, 0xE4E920, 0xE4EA20,
+ 0xE520C7, 0xE5C720, 0xE5C920, 0xE5E620, 0xE620C7, 0xE720C7, 0xE7C720, 0xE8C7E4, 0xE8E620, 0xE920C7, 0xEA20C7, 0xEA20E5, 0xEA20E8, 0xEAC920, 0xEAD120, 0xEAE620,
+}
+
+func newRecognizer_8859_6(language string, ngram *[64]uint32) *recognizerSingleByte {
+ return &recognizerSingleByte{
+ charset: "ISO-8859-6",
+ language: language,
+ charMap: &charMap_8859_6,
+ ngram: ngram,
+ }
+}
+
+func newRecognizer_8859_6_ar() *recognizerSingleByte {
+ return newRecognizer_8859_6("ar", &ngrams_8859_6_ar)
+}
+
+var charMap_8859_7 = [256]byte{
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0xA1, 0xA2, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0xDC, 0x20,
+ 0xDD, 0xDE, 0xDF, 0x20, 0xFC, 0x20, 0xFD, 0xFE,
+ 0xC0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0xF0, 0xF1, 0x20, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xDC, 0xDD, 0xDE, 0xDF,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0x20,
+}
+
+var ngrams_8859_7_el = [64]uint32{
+ 0x20E1ED, 0x20E1F0, 0x20E3E9, 0x20E4E9, 0x20E5F0, 0x20E720, 0x20EAE1, 0x20ECE5, 0x20EDE1, 0x20EF20, 0x20F0E1, 0x20F0EF, 0x20F0F1, 0x20F3F4, 0x20F3F5, 0x20F4E7,
+ 0x20F4EF, 0xDFE120, 0xE120E1, 0xE120F4, 0xE1E920, 0xE1ED20, 0xE1F0FC, 0xE1F220, 0xE3E9E1, 0xE5E920, 0xE5F220, 0xE720F4, 0xE7ED20, 0xE7F220, 0xE920F4, 0xE9E120,
+ 0xE9EADE, 0xE9F220, 0xEAE1E9, 0xEAE1F4, 0xECE520, 0xED20E1, 0xED20E5, 0xED20F0, 0xEDE120, 0xEFF220, 0xEFF520, 0xF0EFF5, 0xF0F1EF, 0xF0FC20, 0xF220E1, 0xF220E5,
+ 0xF220EA, 0xF220F0, 0xF220F4, 0xF3E520, 0xF3E720, 0xF3F4EF, 0xF4E120, 0xF4E1E9, 0xF4E7ED, 0xF4E7F2, 0xF4E9EA, 0xF4EF20, 0xF4EFF5, 0xF4F9ED, 0xF9ED20, 0xFEED20,
+}
+
+func newRecognizer_8859_7(language string, ngram *[64]uint32) *recognizerSingleByte {
+ return &recognizerSingleByte{
+ charset: "ISO-8859-7",
+ hasC1ByteCharset: "windows-1253",
+ language: language,
+ charMap: &charMap_8859_7,
+ ngram: ngram,
+ }
+}
+
+func newRecognizer_8859_7_el() *recognizerSingleByte {
+ return newRecognizer_8859_7("el", &ngrams_8859_7_el)
+}
+
+var charMap_8859_8 = [256]byte{
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0xB5, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ 0xF8, 0xF9, 0xFA, 0x20, 0x20, 0x20, 0x20, 0x20,
+}
+
+var ngrams_8859_8_I_he = [64]uint32{
+ 0x20E0E5, 0x20E0E7, 0x20E0E9, 0x20E0FA, 0x20E1E9, 0x20E1EE, 0x20E4E0, 0x20E4E5, 0x20E4E9, 0x20E4EE, 0x20E4F2, 0x20E4F9, 0x20E4FA, 0x20ECE0, 0x20ECE4, 0x20EEE0,
+ 0x20F2EC, 0x20F9EC, 0xE0FA20, 0xE420E0, 0xE420E1, 0xE420E4, 0xE420EC, 0xE420EE, 0xE420F9, 0xE4E5E0, 0xE5E020, 0xE5ED20, 0xE5EF20, 0xE5F820, 0xE5FA20, 0xE920E4,
+ 0xE9E420, 0xE9E5FA, 0xE9E9ED, 0xE9ED20, 0xE9EF20, 0xE9F820, 0xE9FA20, 0xEC20E0, 0xEC20E4, 0xECE020, 0xECE420, 0xED20E0, 0xED20E1, 0xED20E4, 0xED20EC, 0xED20EE,
+ 0xED20F9, 0xEEE420, 0xEF20E4, 0xF0E420, 0xF0E920, 0xF0E9ED, 0xF2EC20, 0xF820E4, 0xF8E9ED, 0xF9EC20, 0xFA20E0, 0xFA20E1, 0xFA20E4, 0xFA20EC, 0xFA20EE, 0xFA20F9,
+}
+
+var ngrams_8859_8_he = [64]uint32{
+ 0x20E0E5, 0x20E0EC, 0x20E4E9, 0x20E4EC, 0x20E4EE, 0x20E4F0, 0x20E9F0, 0x20ECF2, 0x20ECF9, 0x20EDE5, 0x20EDE9, 0x20EFE5, 0x20EFE9, 0x20F8E5, 0x20F8E9, 0x20FAE0,
+ 0x20FAE5, 0x20FAE9, 0xE020E4, 0xE020EC, 0xE020ED, 0xE020FA, 0xE0E420, 0xE0E5E4, 0xE0EC20, 0xE0EE20, 0xE120E4, 0xE120ED, 0xE120FA, 0xE420E4, 0xE420E9, 0xE420EC,
+ 0xE420ED, 0xE420EF, 0xE420F8, 0xE420FA, 0xE4EC20, 0xE5E020, 0xE5E420, 0xE7E020, 0xE9E020, 0xE9E120, 0xE9E420, 0xEC20E4, 0xEC20ED, 0xEC20FA, 0xECF220, 0xECF920,
+ 0xEDE9E9, 0xEDE9F0, 0xEDE9F8, 0xEE20E4, 0xEE20ED, 0xEE20FA, 0xEEE120, 0xEEE420, 0xF2E420, 0xF920E4, 0xF920ED, 0xF920FA, 0xF9E420, 0xFAE020, 0xFAE420, 0xFAE5E9,
+}
+
+func newRecognizer_8859_8(language string, ngram *[64]uint32) *recognizerSingleByte {
+ return &recognizerSingleByte{
+ charset: "ISO-8859-8",
+ hasC1ByteCharset: "windows-1255",
+ language: language,
+ charMap: &charMap_8859_8,
+ ngram: ngram,
+ }
+}
+
+func newRecognizer_8859_8_I_he() *recognizerSingleByte {
+ r := newRecognizer_8859_8("he", &ngrams_8859_8_I_he)
+ r.charset = "ISO-8859-8-I"
+ return r
+}
+
+func newRecognizer_8859_8_he() *recognizerSingleByte {
+ return newRecognizer_8859_8("he", &ngrams_8859_8_he)
+}
+
+var charMap_8859_9 = [256]byte{
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0xAA, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0xB5, 0x20, 0x20,
+ 0x20, 0x20, 0xBA, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0x20,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0x69, 0xFE, 0xDF,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0x20,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF,
+}
+
+var ngrams_8859_9_tr = [64]uint32{
+ 0x206261, 0x206269, 0x206275, 0x206461, 0x206465, 0x206765, 0x206861, 0x20696C, 0x206B61, 0x206B6F, 0x206D61, 0x206F6C, 0x207361, 0x207461, 0x207665, 0x207961,
+ 0x612062, 0x616B20, 0x616C61, 0x616D61, 0x616E20, 0x616EFD, 0x617220, 0x617261, 0x6172FD, 0x6173FD, 0x617961, 0x626972, 0x646120, 0x646520, 0x646920, 0x652062,
+ 0x65206B, 0x656469, 0x656E20, 0x657220, 0x657269, 0x657369, 0x696C65, 0x696E20, 0x696E69, 0x697220, 0x6C616E, 0x6C6172, 0x6C6520, 0x6C6572, 0x6E2061, 0x6E2062,
+ 0x6E206B, 0x6E6461, 0x6E6465, 0x6E6520, 0x6E6920, 0x6E696E, 0x6EFD20, 0x72696E, 0x72FD6E, 0x766520, 0x796120, 0x796F72, 0xFD6E20, 0xFD6E64, 0xFD6EFD, 0xFDF0FD,
+}
+
+func newRecognizer_8859_9(language string, ngram *[64]uint32) *recognizerSingleByte {
+ return &recognizerSingleByte{
+ charset: "ISO-8859-9",
+ hasC1ByteCharset: "windows-1254",
+ language: language,
+ charMap: &charMap_8859_9,
+ ngram: ngram,
+ }
+}
+
+func newRecognizer_8859_9_tr() *recognizerSingleByte {
+ return newRecognizer_8859_9("tr", &ngrams_8859_9_tr)
+}
+
+var charMap_windows_1256 = [256]byte{
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x81, 0x20, 0x83, 0x20, 0x20, 0x20, 0x20,
+ 0x88, 0x20, 0x8A, 0x20, 0x9C, 0x8D, 0x8E, 0x8F,
+ 0x90, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x98, 0x20, 0x9A, 0x20, 0x9C, 0x20, 0x20, 0x9F,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0xAA, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0xB5, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
+ 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0x20,
+ 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0x20, 0x20, 0x20, 0x20, 0xF4, 0x20, 0x20, 0x20,
+ 0x20, 0xF9, 0x20, 0xFB, 0xFC, 0x20, 0x20, 0xFF,
+}
+
+var ngrams_windows_1256 = [64]uint32{
+ 0x20C7E1, 0x20C7E4, 0x20C8C7, 0x20DAE1, 0x20DDED, 0x20E1E1, 0x20E3E4, 0x20E6C7, 0xC720C7, 0xC7C120, 0xC7CA20, 0xC7D120, 0xC7E120, 0xC7E1C3, 0xC7E1C7, 0xC7E1C8,
+ 0xC7E1CA, 0xC7E1CC, 0xC7E1CD, 0xC7E1CF, 0xC7E1D3, 0xC7E1DA, 0xC7E1DE, 0xC7E1E3, 0xC7E1E6, 0xC7E1ED, 0xC7E320, 0xC7E420, 0xC7E4CA, 0xC820C7, 0xC920C7, 0xC920DD,
+ 0xC920E1, 0xC920E3, 0xC920E6, 0xCA20C7, 0xCF20C7, 0xCFC920, 0xD120C7, 0xD1C920, 0xD320C7, 0xDA20C7, 0xDAE1EC, 0xDDED20, 0xE120C7, 0xE1C920, 0xE1EC20, 0xE1ED20,
+ 0xE320C7, 0xE3C720, 0xE3C920, 0xE3E420, 0xE420C7, 0xE520C7, 0xE5C720, 0xE6C7E1, 0xE6E420, 0xEC20C7, 0xED20C7, 0xED20E3, 0xED20E6, 0xEDC920, 0xEDD120, 0xEDE420,
+}
+
+func newRecognizer_windows_1256() *recognizerSingleByte {
+ return &recognizerSingleByte{
+ charset: "windows-1256",
+ language: "ar",
+ charMap: &charMap_windows_1256,
+ ngram: &ngrams_windows_1256,
+ }
+}
+
+var charMap_windows_1251 = [256]byte{
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x90, 0x83, 0x20, 0x83, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x9A, 0x20, 0x9C, 0x9D, 0x9E, 0x9F,
+ 0x90, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x9A, 0x20, 0x9C, 0x9D, 0x9E, 0x9F,
+ 0x20, 0xA2, 0xA2, 0xBC, 0x20, 0xB4, 0x20, 0x20,
+ 0xB8, 0x20, 0xBA, 0x20, 0x20, 0x20, 0x20, 0xBF,
+ 0x20, 0x20, 0xB3, 0xB3, 0xB4, 0xB5, 0x20, 0x20,
+ 0xB8, 0x20, 0xBA, 0x20, 0xBC, 0xBE, 0xBE, 0xBF,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF,
+}
+
+var ngrams_windows_1251 = [64]uint32{
+ 0x20E220, 0x20E2EE, 0x20E4EE, 0x20E7E0, 0x20E820, 0x20EAE0, 0x20EAEE, 0x20EDE0, 0x20EDE5, 0x20EEE1, 0x20EFEE, 0x20EFF0, 0x20F0E0, 0x20F1EE, 0x20F1F2, 0x20F2EE,
+ 0x20F7F2, 0x20FDF2, 0xE0EDE8, 0xE0F2FC, 0xE3EE20, 0xE5EBFC, 0xE5EDE8, 0xE5F1F2, 0xE5F220, 0xE820EF, 0xE8E520, 0xE8E820, 0xE8FF20, 0xEBE5ED, 0xEBE820, 0xEBFCED,
+ 0xEDE020, 0xEDE520, 0xEDE8E5, 0xEDE8FF, 0xEDEE20, 0xEDEEE2, 0xEE20E2, 0xEE20EF, 0xEE20F1, 0xEEE220, 0xEEE2E0, 0xEEE3EE, 0xEEE920, 0xEEEBFC, 0xEEEC20, 0xEEF1F2,
+ 0xEFEEEB, 0xEFF0E5, 0xEFF0E8, 0xEFF0EE, 0xF0E0E2, 0xF0E5E4, 0xF1F2E0, 0xF1F2E2, 0xF1F2E8, 0xF1FF20, 0xF2E5EB, 0xF2EE20, 0xF2EEF0, 0xF2FC20, 0xF7F2EE, 0xFBF520,
+}
+
+func newRecognizer_windows_1251() *recognizerSingleByte {
+ return &recognizerSingleByte{
+ charset: "windows-1251",
+ language: "ar",
+ charMap: &charMap_windows_1251,
+ ngram: &ngrams_windows_1251,
+ }
+}
+
+var charMap_KOI8_R = [256]byte{
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0xA3, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0xA3, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
+ 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
+ 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
+ 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
+ 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
+ 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
+}
+
+var ngrams_KOI8_R = [64]uint32{
+ 0x20C4CF, 0x20C920, 0x20CBC1, 0x20CBCF, 0x20CEC1, 0x20CEC5, 0x20CFC2, 0x20D0CF, 0x20D0D2, 0x20D2C1, 0x20D3CF, 0x20D3D4, 0x20D4CF, 0x20D720, 0x20D7CF, 0x20DAC1,
+ 0x20DCD4, 0x20DED4, 0xC1CEC9, 0xC1D4D8, 0xC5CCD8, 0xC5CEC9, 0xC5D3D4, 0xC5D420, 0xC7CF20, 0xC920D0, 0xC9C520, 0xC9C920, 0xC9D120, 0xCCC5CE, 0xCCC920, 0xCCD8CE,
+ 0xCEC120, 0xCEC520, 0xCEC9C5, 0xCEC9D1, 0xCECF20, 0xCECFD7, 0xCF20D0, 0xCF20D3, 0xCF20D7, 0xCFC7CF, 0xCFCA20, 0xCFCCD8, 0xCFCD20, 0xCFD3D4, 0xCFD720, 0xCFD7C1,
+ 0xD0CFCC, 0xD0D2C5, 0xD0D2C9, 0xD0D2CF, 0xD2C1D7, 0xD2C5C4, 0xD3D120, 0xD3D4C1, 0xD3D4C9, 0xD3D4D7, 0xD4C5CC, 0xD4CF20, 0xD4CFD2, 0xD4D820, 0xD9C820, 0xDED4CF,
+}
+
+func newRecognizer_KOI8_R() *recognizerSingleByte {
+ return &recognizerSingleByte{
+ charset: "KOI8-R",
+ language: "ru",
+ charMap: &charMap_KOI8_R,
+ ngram: &ngrams_KOI8_R,
+ }
+}
+
+var charMap_IBM424_he = [256]byte{
+ /* -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -A -B -C -D -E -F */
+ /* 0- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* 1- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* 2- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* 3- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* 4- */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* 5- */ 0x40, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* 6- */ 0x40, 0x40, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* 7- */ 0x40, 0x71, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x00, 0x40, 0x40,
+ /* 8- */ 0x40, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* 9- */ 0x40, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* A- */ 0xA0, 0x40, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* B- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* C- */ 0x40, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* D- */ 0x40, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* E- */ 0x40, 0x40, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* F- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+}
+
+var ngrams_IBM424_he_rtl = [64]uint32{
+ 0x404146, 0x404148, 0x404151, 0x404171, 0x404251, 0x404256, 0x404541, 0x404546, 0x404551, 0x404556, 0x404562, 0x404569, 0x404571, 0x405441, 0x405445, 0x405641,
+ 0x406254, 0x406954, 0x417140, 0x454041, 0x454042, 0x454045, 0x454054, 0x454056, 0x454069, 0x454641, 0x464140, 0x465540, 0x465740, 0x466840, 0x467140, 0x514045,
+ 0x514540, 0x514671, 0x515155, 0x515540, 0x515740, 0x516840, 0x517140, 0x544041, 0x544045, 0x544140, 0x544540, 0x554041, 0x554042, 0x554045, 0x554054, 0x554056,
+ 0x554069, 0x564540, 0x574045, 0x584540, 0x585140, 0x585155, 0x625440, 0x684045, 0x685155, 0x695440, 0x714041, 0x714042, 0x714045, 0x714054, 0x714056, 0x714069,
+}
+
+var ngrams_IBM424_he_ltr = [64]uint32{
+ 0x404146, 0x404154, 0x404551, 0x404554, 0x404556, 0x404558, 0x405158, 0x405462, 0x405469, 0x405546, 0x405551, 0x405746, 0x405751, 0x406846, 0x406851, 0x407141,
+ 0x407146, 0x407151, 0x414045, 0x414054, 0x414055, 0x414071, 0x414540, 0x414645, 0x415440, 0x415640, 0x424045, 0x424055, 0x424071, 0x454045, 0x454051, 0x454054,
+ 0x454055, 0x454057, 0x454068, 0x454071, 0x455440, 0x464140, 0x464540, 0x484140, 0x514140, 0x514240, 0x514540, 0x544045, 0x544055, 0x544071, 0x546240, 0x546940,
+ 0x555151, 0x555158, 0x555168, 0x564045, 0x564055, 0x564071, 0x564240, 0x564540, 0x624540, 0x694045, 0x694055, 0x694071, 0x694540, 0x714140, 0x714540, 0x714651,
+}
+
+func newRecognizer_IBM424_he(charset string, ngram *[64]uint32) *recognizerSingleByte {
+ return &recognizerSingleByte{
+ charset: charset,
+ language: "he",
+ charMap: &charMap_IBM424_he,
+ ngram: ngram,
+ }
+}
+
+func newRecognizer_IBM424_he_rtl() *recognizerSingleByte {
+ return newRecognizer_IBM424_he("IBM424_rtl", &ngrams_IBM424_he_rtl)
+}
+
+func newRecognizer_IBM424_he_ltr() *recognizerSingleByte {
+ return newRecognizer_IBM424_he("IBM424_ltr", &ngrams_IBM424_he_ltr)
+}
+
+var charMap_IBM420_ar = [256]byte{
+ /* -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -A -B -C -D -E -F */
+ /* 0- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* 1- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* 2- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* 3- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* 4- */ 0x40, 0x40, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* 5- */ 0x40, 0x51, 0x52, 0x40, 0x40, 0x55, 0x56, 0x57, 0x58, 0x59, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* 6- */ 0x40, 0x40, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* 7- */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ /* 8- */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
+ /* 9- */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F,
+ /* A- */ 0xA0, 0x40, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
+ /* B- */ 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0x40, 0x40, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
+ /* C- */ 0x40, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x40, 0xCB, 0x40, 0xCD, 0x40, 0xCF,
+ /* D- */ 0x40, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
+ /* E- */ 0x40, 0x40, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xEA, 0xEB, 0x40, 0xED, 0xEE, 0xEF,
+ /* F- */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0xFB, 0xFC, 0xFD, 0xFE, 0x40,
+}
+
+var ngrams_IBM420_ar_rtl = [64]uint32{
+ 0x4056B1, 0x4056BD, 0x405856, 0x409AB1, 0x40ABDC, 0x40B1B1, 0x40BBBD, 0x40CF56, 0x564056, 0x564640, 0x566340, 0x567540, 0x56B140, 0x56B149, 0x56B156, 0x56B158,
+ 0x56B163, 0x56B167, 0x56B169, 0x56B173, 0x56B178, 0x56B19A, 0x56B1AD, 0x56B1BB, 0x56B1CF, 0x56B1DC, 0x56BB40, 0x56BD40, 0x56BD63, 0x584056, 0x624056, 0x6240AB,
+ 0x6240B1, 0x6240BB, 0x6240CF, 0x634056, 0x734056, 0x736240, 0x754056, 0x756240, 0x784056, 0x9A4056, 0x9AB1DA, 0xABDC40, 0xB14056, 0xB16240, 0xB1DA40, 0xB1DC40,
+ 0xBB4056, 0xBB5640, 0xBB6240, 0xBBBD40, 0xBD4056, 0xBF4056, 0xBF5640, 0xCF56B1, 0xCFBD40, 0xDA4056, 0xDC4056, 0xDC40BB, 0xDC40CF, 0xDC6240, 0xDC7540, 0xDCBD40,
+}
+
+var ngrams_IBM420_ar_ltr = [64]uint32{
+ 0x404656, 0x4056BB, 0x4056BF, 0x406273, 0x406275, 0x4062B1, 0x4062BB, 0x4062DC, 0x406356, 0x407556, 0x4075DC, 0x40B156, 0x40BB56, 0x40BD56, 0x40BDBB, 0x40BDCF,
+ 0x40BDDC, 0x40DAB1, 0x40DCAB, 0x40DCB1, 0x49B156, 0x564056, 0x564058, 0x564062, 0x564063, 0x564073, 0x564075, 0x564078, 0x56409A, 0x5640B1, 0x5640BB, 0x5640BD,
+ 0x5640BF, 0x5640DA, 0x5640DC, 0x565840, 0x56B156, 0x56CF40, 0x58B156, 0x63B156, 0x63BD56, 0x67B156, 0x69B156, 0x73B156, 0x78B156, 0x9AB156, 0xAB4062, 0xADB156,
+ 0xB14062, 0xB15640, 0xB156CF, 0xB19A40, 0xB1B140, 0xBB4062, 0xBB40DC, 0xBBB156, 0xBD5640, 0xBDBB40, 0xCF4062, 0xCF40DC, 0xCFB156, 0xDAB19A, 0xDCAB40, 0xDCB156,
+}
+
+func newRecognizer_IBM420_ar(charset string, ngram *[64]uint32) *recognizerSingleByte {
+ return &recognizerSingleByte{
+ charset: charset,
+ language: "ar",
+ charMap: &charMap_IBM420_ar,
+ ngram: ngram,
+ }
+}
+
+func newRecognizer_IBM420_ar_rtl() *recognizerSingleByte {
+ return newRecognizer_IBM420_ar("IBM420_rtl", &ngrams_IBM420_ar_rtl)
+}
+
+func newRecognizer_IBM420_ar_ltr() *recognizerSingleByte {
+ return newRecognizer_IBM420_ar("IBM420_ltr", &ngrams_IBM420_ar_ltr)
+}
diff --git a/vendor/github.com/saintfish/chardet/unicode.go b/vendor/github.com/saintfish/chardet/unicode.go
new file mode 100644
index 000000000..6f9fa9e67
--- /dev/null
+++ b/vendor/github.com/saintfish/chardet/unicode.go
@@ -0,0 +1,103 @@
+package chardet
+
+import (
+ "bytes"
+)
+
+var (
+ utf16beBom = []byte{0xFE, 0xFF}
+ utf16leBom = []byte{0xFF, 0xFE}
+ utf32beBom = []byte{0x00, 0x00, 0xFE, 0xFF}
+ utf32leBom = []byte{0xFF, 0xFE, 0x00, 0x00}
+)
+
+type recognizerUtf16be struct {
+}
+
+func newRecognizer_utf16be() *recognizerUtf16be {
+ return &recognizerUtf16be{}
+}
+
+func (*recognizerUtf16be) Match(input *recognizerInput) (output recognizerOutput) {
+ output = recognizerOutput{
+ Charset: "UTF-16BE",
+ }
+ if bytes.HasPrefix(input.raw, utf16beBom) {
+ output.Confidence = 100
+ }
+ return
+}
+
+type recognizerUtf16le struct {
+}
+
+func newRecognizer_utf16le() *recognizerUtf16le {
+ return &recognizerUtf16le{}
+}
+
+func (*recognizerUtf16le) Match(input *recognizerInput) (output recognizerOutput) {
+ output = recognizerOutput{
+ Charset: "UTF-16LE",
+ }
+ if bytes.HasPrefix(input.raw, utf16leBom) && !bytes.HasPrefix(input.raw, utf32leBom) {
+ output.Confidence = 100
+ }
+ return
+}
+
+type recognizerUtf32 struct {
+ name string
+ bom []byte
+ decodeChar func(input []byte) uint32
+}
+
+func decodeUtf32be(input []byte) uint32 {
+ return uint32(input[0])<<24 | uint32(input[1])<<16 | uint32(input[2])<<8 | uint32(input[3])
+}
+
+func decodeUtf32le(input []byte) uint32 {
+ return uint32(input[3])<<24 | uint32(input[2])<<16 | uint32(input[1])<<8 | uint32(input[0])
+}
+
+func newRecognizer_utf32be() *recognizerUtf32 {
+ return &recognizerUtf32{
+ "UTF-32BE",
+ utf32beBom,
+ decodeUtf32be,
+ }
+}
+
+func newRecognizer_utf32le() *recognizerUtf32 {
+ return &recognizerUtf32{
+ "UTF-32LE",
+ utf32leBom,
+ decodeUtf32le,
+ }
+}
+
+func (r *recognizerUtf32) Match(input *recognizerInput) (output recognizerOutput) {
+ output = recognizerOutput{
+ Charset: r.name,
+ }
+ hasBom := bytes.HasPrefix(input.raw, r.bom)
+ var numValid, numInvalid uint32
+ for b := input.raw; len(b) >= 4; b = b[4:] {
+ if c := r.decodeChar(b); c >= 0x10FFFF || (c >= 0xD800 && c <= 0xDFFF) {
+ numInvalid++
+ } else {
+ numValid++
+ }
+ }
+ if hasBom && numInvalid == 0 {
+ output.Confidence = 100
+ } else if hasBom && numValid > numInvalid*10 {
+ output.Confidence = 80
+ } else if numValid > 3 && numInvalid == 0 {
+ output.Confidence = 100
+ } else if numValid > 0 && numInvalid == 0 {
+ output.Confidence = 80
+ } else if numValid > numInvalid*10 {
+ output.Confidence = 25
+ }
+ return
+}
diff --git a/vendor/github.com/saintfish/chardet/utf8.go b/vendor/github.com/saintfish/chardet/utf8.go
new file mode 100644
index 000000000..ae036ad9b
--- /dev/null
+++ b/vendor/github.com/saintfish/chardet/utf8.go
@@ -0,0 +1,71 @@
+package chardet
+
+import (
+ "bytes"
+)
+
+var utf8Bom = []byte{0xEF, 0xBB, 0xBF}
+
+type recognizerUtf8 struct {
+}
+
+func newRecognizer_utf8() *recognizerUtf8 {
+ return &recognizerUtf8{}
+}
+
+func (*recognizerUtf8) Match(input *recognizerInput) (output recognizerOutput) {
+ output = recognizerOutput{
+ Charset: "UTF-8",
+ }
+ hasBom := bytes.HasPrefix(input.raw, utf8Bom)
+ inputLen := len(input.raw)
+ var numValid, numInvalid uint32
+ var trailBytes uint8
+ for i := 0; i < inputLen; i++ {
+ c := input.raw[i]
+ if c&0x80 == 0 {
+ continue
+ }
+ if c&0xE0 == 0xC0 {
+ trailBytes = 1
+ } else if c&0xF0 == 0xE0 {
+ trailBytes = 2
+ } else if c&0xF8 == 0xF0 {
+ trailBytes = 3
+ } else {
+ numInvalid++
+ if numInvalid > 5 {
+ break
+ }
+ trailBytes = 0
+ }
+
+ for i++; i < inputLen; i++ {
+ c = input.raw[i]
+ if c&0xC0 != 0x80 {
+ numInvalid++
+ break
+ }
+ if trailBytes--; trailBytes == 0 {
+ numValid++
+ break
+ }
+ }
+ }
+
+ if hasBom && numInvalid == 0 {
+ output.Confidence = 100
+ } else if hasBom && numValid > numInvalid*10 {
+ output.Confidence = 80
+ } else if numValid > 3 && numInvalid == 0 {
+ output.Confidence = 100
+ } else if numValid > 0 && numInvalid == 0 {
+ output.Confidence = 80
+ } else if numValid == 0 && numInvalid == 0 {
+ // Plain ASCII
+ output.Confidence = 10
+ } else if numValid > numInvalid*10 {
+ output.Confidence = 25
+ }
+ return
+}
diff --git a/vendor/github.com/temoto/robotstxt/.gitignore b/vendor/github.com/temoto/robotstxt/.gitignore
new file mode 100644
index 000000000..2ef152f63
--- /dev/null
+++ b/vendor/github.com/temoto/robotstxt/.gitignore
@@ -0,0 +1,9 @@
+*.cgo?.*
+*.o
+*.so
+*.sublime-*
+.DS_Store
+_cgo_*
+_obj
+_test
+coverage.txt
diff --git a/vendor/github.com/temoto/robotstxt/.travis.yml b/vendor/github.com/temoto/robotstxt/.travis.yml
new file mode 100644
index 000000000..94d72beaa
--- /dev/null
+++ b/vendor/github.com/temoto/robotstxt/.travis.yml
@@ -0,0 +1,30 @@
+language: go
+sudo: false
+
+cache:
+ go: true
+ directories:
+ - "$HOME/.cache"
+go:
+- master
+- 1.10.x
+- 1.9
+- 1.8
+- 1.7
+- 1.6
+
+matrix:
+ include:
+ - go: 1.10.x
+ env: task=bench
+ - go: 1.7
+ env: task=bench
+ - go: master
+ env: task=bench
+ - go: master
+ env: task=clean
+
+install:
+ - go get -u github.com/alecthomas/gometalinter
+script: ./script/${task:-test}
+after_success: if [[ -z "$task" ]] ; then bash <(curl -s https://codecov.io/bash) ; fi
diff --git a/vendor/github.com/temoto/robotstxt/LICENSE b/vendor/github.com/temoto/robotstxt/LICENSE
new file mode 100644
index 000000000..c125145b6
--- /dev/null
+++ b/vendor/github.com/temoto/robotstxt/LICENSE
@@ -0,0 +1,21 @@
+The MIT License
+
+Copyright (c) 2010 Sergey Shepelev
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/temoto/robotstxt/README.rst b/vendor/github.com/temoto/robotstxt/README.rst
new file mode 100644
index 000000000..2f181810c
--- /dev/null
+++ b/vendor/github.com/temoto/robotstxt/README.rst
@@ -0,0 +1,112 @@
+What
+====
+
+This is a robots.txt exclusion protocol implementation for Go language (golang).
+
+
+Build
+=====
+
+To build and run tests run `script/test` in source directory.
+
+
+Contribute
+==========
+
+Warm welcome.
+
+* If desired, add your name in README.rst, section Who.
+* Run `script/test && script/clean && echo ok`
+* You can ignore linter warnings, but everything else must pass.
+* Send your change as pull request or just a regular patch to current maintainer (see section Who).
+
+Thank you.
+
+
+Usage
+=====
+
+As usual, no special installation is required, just
+
+ import "github.com/temoto/robotstxt"
+
+run `go get` and you're ready.
+
+1. Parse
+^^^^^^^^
+
+First of all, you need to parse robots.txt data. You can do it with
+functions `FromBytes(body []byte) (*RobotsData, error)` or same for `string`::
+
+ robots, err := robotstxt.FromBytes([]byte("User-agent: *\nDisallow:"))
+ robots, err := robotstxt.FromString("User-agent: *\nDisallow:")
+
+As of 2012-10-03, `FromBytes` is the most efficient method, everything else
+is a wrapper for this core function.
+
+There are few convenient constructors for various purposes:
+
+* `FromResponse(*http.Response) (*RobotsData, error)` to init robots data
+from HTTP response. It *does not* call `response.Body.Close()`::
+
+ robots, err := robotstxt.FromResponse(resp)
+ resp.Body.Close()
+ if err != nil {
+ log.Println("Error parsing robots.txt:", err.Error())
+ }
+
+* `FromStatusAndBytes(statusCode int, body []byte) (*RobotsData, error)` or
+`FromStatusAndString` if you prefer to read bytes (string) yourself.
+Passing status code applies following logic in line with Google's interpretation
+of robots.txt files:
+
+ * status 2xx -> parse body with `FromBytes` and apply rules listed there.
+ * status 4xx -> allow all (even 401/403, as recommended by Google).
+ * other (5xx) -> disallow all, consider this a temporary unavailability.
+
+2. Query
+^^^^^^^^
+
+Parsing robots.txt content builds a kind of logic database, which you can
+query with `(r *RobotsData) TestAgent(url, agent string) (bool)`.
+
+Explicit passing of agent is useful if you want to query for different agents. For
+single agent users there is an efficient option: `RobotsData.FindGroup(userAgent string)`
+returns a structure with `.Test(path string)` method and `.CrawlDelay time.Duration`.
+
+Simple query with explicit user agent. Each call will scan all rules.
+
+::
+
+ allow := robots.TestAgent("/", "FooBot")
+
+Or query several paths against same user agent for performance.
+
+::
+
+ group := robots.FindGroup("BarBot")
+ group.Test("/")
+ group.Test("/download.mp3")
+ group.Test("/news/article-2012-1")
+
+
+Who
+===
+
+Honorable contributors (in undefined order):
+
+ * Ilya Grigorik (igrigorik)
+ * Martin Angers (PuerkitoBio)
+ * Micha Gorelick (mynameisfiber)
+
+Initial commit and other: Sergey Shepelev temotor@gmail.com
+
+
+Flair
+=====
+
+.. image:: https://travis-ci.org/temoto/robotstxt.svg?branch=master
+ :target: https://travis-ci.org/temoto/robotstxt
+
+.. image:: https://codecov.io/gh/temoto/robotstxt/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/temoto/robotstxt
diff --git a/vendor/github.com/temoto/robotstxt/codecov.yml b/vendor/github.com/temoto/robotstxt/codecov.yml
new file mode 100644
index 000000000..b80be28f6
--- /dev/null
+++ b/vendor/github.com/temoto/robotstxt/codecov.yml
@@ -0,0 +1,2 @@
+codecov:
+ token: 6bf9c7eb-69ff-4b74-8464-e2fb452d0f04
diff --git a/vendor/github.com/temoto/robotstxt/metalinter.json b/vendor/github.com/temoto/robotstxt/metalinter.json
new file mode 100644
index 000000000..0cfc7a064
--- /dev/null
+++ b/vendor/github.com/temoto/robotstxt/metalinter.json
@@ -0,0 +1,9 @@
+{
+ "Deadline": "60s",
+ "Exclude": [
+ "should have comment or be unexported"
+ ],
+ "Tests": true,
+ "Vendor": true,
+ "VendoredLinters": true
+}
diff --git a/vendor/github.com/temoto/robotstxt/parser.go b/vendor/github.com/temoto/robotstxt/parser.go
new file mode 100644
index 000000000..0ce2acc63
--- /dev/null
+++ b/vendor/github.com/temoto/robotstxt/parser.go
@@ -0,0 +1,266 @@
+package robotstxt
+
+// Comments explaining the logic are taken from either the google's spec:
+// https://developers.google.com/webmasters/control-crawl-index/docs/robots_txt
+//
+// or the Wikipedia's entry on robots.txt:
+// http://en.wikipedia.org/wiki/Robots.txt
+
+import (
+ "fmt"
+ "io"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type lineType uint
+
+const (
+ lIgnore lineType = iota
+ lUnknown
+ lUserAgent
+ lAllow
+ lDisallow
+ lCrawlDelay
+ lSitemap
+ lHost
+)
+
+type parser struct {
+ tokens []string
+ pos int
+}
+
+type lineInfo struct {
+ t lineType // Type of line key
+ k string // String representation of the type of key
+ vs string // String value of the key
+ vf float64 // Float value of the key
+ vr *regexp.Regexp // Regexp value of the key
+}
+
+func newParser(tokens []string) *parser {
+ return &parser{tokens: tokens}
+}
+
+func parseGroupMap(groups map[string]*Group, agents []string, fun func(*Group)) {
+ var g *Group
+ for _, a := range agents {
+ if g = groups[a]; g == nil {
+ g = new(Group)
+ groups[a] = g
+ }
+ fun(g)
+ }
+}
+
+func (p *parser) parseAll() (groups map[string]*Group, host string, sitemaps []string, errs []error) {
+ groups = make(map[string]*Group, 16)
+ agents := make([]string, 0, 4)
+ isEmptyGroup := true
+
+ // Reset internal fields, tokens are assigned at creation time, never change
+ p.pos = 0
+
+ for {
+ if li, err := p.parseLine(); err != nil {
+ if err == io.EOF {
+ break
+ }
+ errs = append(errs, err)
+ } else {
+ switch li.t {
+ case lUserAgent:
+ // Two successive user-agent lines are part of the same group.
+ if !isEmptyGroup {
+ // End previous group
+ agents = make([]string, 0, 4)
+ }
+ if len(agents) == 0 {
+ isEmptyGroup = true
+ }
+ agents = append(agents, li.vs)
+
+ case lDisallow:
+ // Error if no current group
+ if len(agents) == 0 {
+ errs = append(errs, fmt.Errorf("Disallow before User-agent at token #%d.", p.pos))
+ } else {
+ isEmptyGroup = false
+ var r *rule
+ if li.vr != nil {
+ r = &rule{"", false, li.vr}
+ } else {
+ r = &rule{li.vs, false, nil}
+ }
+ parseGroupMap(groups, agents, func(g *Group) { g.rules = append(g.rules, r) })
+ }
+
+ case lAllow:
+ // Error if no current group
+ if len(agents) == 0 {
+ errs = append(errs, fmt.Errorf("Allow before User-agent at token #%d.", p.pos))
+ } else {
+ isEmptyGroup = false
+ var r *rule
+ if li.vr != nil {
+ r = &rule{"", true, li.vr}
+ } else {
+ r = &rule{li.vs, true, nil}
+ }
+ parseGroupMap(groups, agents, func(g *Group) { g.rules = append(g.rules, r) })
+ }
+
+ case lHost:
+ host = li.vs
+
+ case lSitemap:
+ sitemaps = append(sitemaps, li.vs)
+
+ case lCrawlDelay:
+ if len(agents) == 0 {
+ errs = append(errs, fmt.Errorf("Crawl-delay before User-agent at token #%d.", p.pos))
+ } else {
+ isEmptyGroup = false
+ delay := time.Duration(li.vf * float64(time.Second))
+ parseGroupMap(groups, agents, func(g *Group) { g.CrawlDelay = delay })
+ }
+ }
+ }
+ }
+ return
+}
+
+func (p *parser) parseLine() (li *lineInfo, err error) {
+ t1, ok1 := p.popToken()
+ if !ok1 {
+ // proper EOF
+ return nil, io.EOF
+ }
+
+ t2, ok2 := p.peekToken()
+ if !ok2 {
+ // EOF, no value associated with the token, so ignore token and return
+ return nil, io.EOF
+ }
+
+ // Helper closure for all string-based tokens, common behaviour:
+ // - Consume t2 token
+ // - If empty, return unkown line info
+ // - Otherwise return the specified line info
+ returnStringVal := func(t lineType) (*lineInfo, error) {
+ p.popToken()
+ if t2 != "" {
+ return &lineInfo{t: t, k: t1, vs: t2}, nil
+ }
+ return &lineInfo{t: lIgnore}, nil
+ }
+
+ // Helper closure for all path tokens (allow/disallow), common behaviour:
+ // - Consume t2 token
+ // - If empty, return unkown line info
+ // - Otherwise, normalize the path (add leading "/" if missing, remove trailing "*")
+ // - Detect if wildcards are present, if so, compile into a regexp
+ // - Return the specified line info
+ returnPathVal := func(t lineType) (*lineInfo, error) {
+ p.popToken()
+ if t2 != "" {
+ if !strings.HasPrefix(t2, "*") && !strings.HasPrefix(t2, "/") {
+ t2 = "/" + t2
+ }
+ if strings.HasSuffix(t2, "*") {
+ t2 = strings.TrimRight(t2, "*")
+ }
+ // From google's spec:
+ // Google, Bing, Yahoo, and Ask support a limited form of
+ // "wildcards" for path values. These are:
+ // * designates 0 or more instances of any valid character
+ // $ designates the end of the URL
+ if strings.ContainsAny(t2, "*$") {
+ // Must compile a regexp, this is a pattern.
+ // Escape string before compile.
+ t2 = regexp.QuoteMeta(t2)
+ t2 = strings.Replace(t2, `\*`, `.*`, -1)
+ t2 = strings.Replace(t2, `\$`, `$`, -1)
+ if r, e := regexp.Compile(t2); e != nil {
+ return nil, e
+ } else {
+ return &lineInfo{t: t, k: t1, vr: r}, nil
+ }
+ } else {
+ // Simple string path
+ return &lineInfo{t: t, k: t1, vs: t2}, nil
+ }
+ }
+ return &lineInfo{t: lIgnore}, nil
+ }
+
+ switch strings.ToLower(t1) {
+ case "\n":
+ // Don't consume t2 and continue parsing
+ return &lineInfo{t: lIgnore}, nil
+
+ case "user-agent", "useragent":
+ // From google's spec:
+ // Handling of elements with simple errors / typos (eg "useragent"
+ // instead of "user-agent") is undefined and may be interpreted as correct
+ // directives by some user-agents.
+ // The user-agent is non-case-sensitive.
+ t2 = strings.ToLower(t2)
+ return returnStringVal(lUserAgent)
+
+ case "disallow":
+ // From google's spec:
+ // When no path is specified, the directive is ignored (so an empty Disallow
+ // CAN be an allow, since allow is the default. The actual result depends
+ // on the other rules in the group).
+ return returnPathVal(lDisallow)
+
+ case "allow":
+ // From google's spec:
+ // When no path is specified, the directive is ignored.
+ return returnPathVal(lAllow)
+
+ case "host":
+ // Host directive to specify main site mirror
+ // Read more: https://help.yandex.com/webmaster/controlling-robot/robots-txt.xml#host
+ return returnStringVal(lHost)
+
+ case "sitemap":
+ // Non-group field, applies to the host as a whole, not to a specific user-agent
+ return returnStringVal(lSitemap)
+
+ case "crawl-delay", "crawldelay":
+ // From http://en.wikipedia.org/wiki/Robots_exclusion_standard#Nonstandard_extensions
+ // Several major crawlers support a Crawl-delay parameter, set to the
+ // number of seconds to wait between successive requests to the same server.
+ p.popToken()
+ if cd, e := strconv.ParseFloat(t2, 64); e != nil {
+ return nil, e
+ } else {
+ return &lineInfo{t: lCrawlDelay, k: t1, vf: cd}, nil
+ }
+ }
+
+ // Consume t2 token
+ p.popToken()
+ return &lineInfo{t: lUnknown, k: t1}, nil
+}
+
+func (p *parser) popToken() (tok string, ok bool) {
+ tok, ok = p.peekToken()
+ if !ok {
+ return
+ }
+ p.pos++
+ return tok, true
+}
+
+func (p *parser) peekToken() (tok string, ok bool) {
+ if p.pos >= len(p.tokens) {
+ return "", false
+ }
+ return p.tokens[p.pos], true
+}
diff --git a/vendor/github.com/temoto/robotstxt/robotstxt.go b/vendor/github.com/temoto/robotstxt/robotstxt.go
new file mode 100644
index 000000000..9dfcc2552
--- /dev/null
+++ b/vendor/github.com/temoto/robotstxt/robotstxt.go
@@ -0,0 +1,231 @@
+// Package robotstxt implements the robots.txt Exclusion Protocol
+// as specified in http://www.robotstxt.org/wc/robots.html
+// with various extensions.
+package robotstxt
+
+// Comments explaining the logic are taken from either the Google's spec:
+// https://developers.google.com/webmasters/control-crawl-index/docs/robots_txt
+
+import (
+ "bytes"
+ "errors"
+ "io/ioutil"
+ "net/http"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type RobotsData struct {
+ // private
+ groups map[string]*Group
+ allowAll bool
+ disallowAll bool
+ Host string
+ Sitemaps []string
+}
+
+type Group struct {
+ rules []*rule
+ Agent string
+ CrawlDelay time.Duration
+}
+
+type rule struct {
+ path string
+ allow bool
+ pattern *regexp.Regexp
+}
+
+type ParseError struct {
+ Errs []error
+}
+
+func newParseError(errs []error) *ParseError {
+ return &ParseError{errs}
+}
+
+func (e ParseError) Error() string {
+ var b bytes.Buffer
+
+ b.WriteString("Parse error(s): " + "\n")
+ for _, er := range e.Errs {
+ b.WriteString(er.Error() + "\n")
+ }
+ return b.String()
+}
+
+var allowAll = &RobotsData{allowAll: true}
+var disallowAll = &RobotsData{disallowAll: true}
+var emptyGroup = &Group{}
+
+func FromStatusAndBytes(statusCode int, body []byte) (*RobotsData, error) {
+ switch {
+ case statusCode >= 200 && statusCode < 300:
+ return FromBytes(body)
+
+ // From https://developers.google.com/webmasters/control-crawl-index/docs/robots_txt
+ //
+ // Google treats all 4xx errors in the same way and assumes that no valid
+ // robots.txt file exists. It is assumed that there are no restrictions.
+ // This is a "full allow" for crawling. Note: this includes 401
+ // "Unauthorized" and 403 "Forbidden" HTTP result codes.
+ case statusCode >= 400 && statusCode < 500:
+ return allowAll, nil
+
+ // From Google's spec:
+ // Server errors (5xx) are seen as temporary errors that result in a "full
+ // disallow" of crawling.
+ case statusCode >= 500 && statusCode < 600:
+ return disallowAll, nil
+ }
+
+ return nil, errors.New("Unexpected status: " + strconv.Itoa(statusCode))
+}
+
+func FromStatusAndString(statusCode int, body string) (*RobotsData, error) {
+ return FromStatusAndBytes(statusCode, []byte(body))
+}
+
+func FromResponse(res *http.Response) (*RobotsData, error) {
+ if res == nil {
+ // Edge case, if res is nil, return nil data
+ return nil, nil
+ }
+ buf, e := ioutil.ReadAll(res.Body)
+ if e != nil {
+ return nil, e
+ }
+ return FromStatusAndBytes(res.StatusCode, buf)
+}
+
+func FromBytes(body []byte) (r *RobotsData, err error) {
+ var errs []error
+
+ // special case (probably not worth optimization?)
+ trimmed := bytes.TrimSpace(body)
+ if len(trimmed) == 0 {
+ return allowAll, nil
+ }
+
+ sc := newByteScanner("bytes", true)
+ //sc.Quiet = !print_errors
+ sc.Feed(body, true)
+ var tokens []string
+ tokens, err = sc.ScanAll()
+ if err != nil {
+ return nil, err
+ }
+
+ // special case worth optimization
+ if len(tokens) == 0 {
+ return allowAll, nil
+ }
+
+ r = &RobotsData{}
+ parser := newParser(tokens)
+ r.groups, r.Host, r.Sitemaps, errs = parser.parseAll()
+ if len(errs) > 0 {
+ return nil, newParseError(errs)
+ }
+
+ return r, nil
+}
+
+func FromString(body string) (r *RobotsData, err error) {
+ return FromBytes([]byte(body))
+}
+
+func (r *RobotsData) TestAgent(path, agent string) bool {
+ if r.allowAll {
+ return true
+ }
+ if r.disallowAll {
+ return false
+ }
+
+ // Find a group of rules that applies to this agent
+ // From Google's spec:
+ // The user-agent is non-case-sensitive.
+ g := r.FindGroup(agent)
+ return g.Test(path)
+}
+
+// FindGroup searches block of declarations for specified user-agent.
+// From Google's spec:
+// Only one group of group-member records is valid for a particular crawler.
+// The crawler must determine the correct group of records by finding the group
+// with the most specific user-agent that still matches. All other groups of
+// records are ignored by the crawler. The user-agent is non-case-sensitive.
+// The order of the groups within the robots.txt file is irrelevant.
+func (r *RobotsData) FindGroup(agent string) (ret *Group) {
+ var prefixLen int
+
+ agent = strings.ToLower(agent)
+ if ret = r.groups["*"]; ret != nil {
+ // Weakest match possible
+ prefixLen = 1
+ }
+ for a, g := range r.groups {
+ if a != "*" && strings.HasPrefix(agent, a) {
+ if l := len(a); l > prefixLen {
+ prefixLen = l
+ ret = g
+ }
+ }
+ }
+
+ if ret == nil {
+ return emptyGroup
+ }
+ return
+}
+
+func (g *Group) Test(path string) bool {
+ if r := g.findRule(path); r != nil {
+ return r.allow
+ }
+
+ // From Google's spec:
+ // By default, there are no restrictions for crawling for the designated crawlers.
+ return true
+}
+
+// From Google's spec:
+// The path value is used as a basis to determine whether or not a rule applies
+// to a specific URL on a site. With the exception of wildcards, the path is
+// used to match the beginning of a URL (and any valid URLs that start with the
+// same path).
+//
+// At a group-member level, in particular for allow and disallow directives,
+// the most specific rule based on the length of the [path] entry will trump
+// the less specific (shorter) rule. The order of precedence for rules with
+// wildcards is undefined.
+func (g *Group) findRule(path string) (ret *rule) {
+ var prefixLen int
+
+ for _, r := range g.rules {
+ if r.pattern != nil {
+ if r.pattern.MatchString(path) {
+ // Consider this a match equal to the length of the pattern.
+ // From Google's spec:
+ // The order of precedence for rules with wildcards is undefined.
+ if l := len(r.pattern.String()); l > prefixLen {
+ prefixLen = len(r.pattern.String())
+ ret = r
+ }
+ }
+ } else if r.path == "/" && prefixLen == 0 {
+ // Weakest match possible
+ prefixLen = 1
+ ret = r
+ } else if strings.HasPrefix(path, r.path) {
+ if l := len(r.path); l > prefixLen {
+ prefixLen = l
+ ret = r
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/temoto/robotstxt/scanner.go b/vendor/github.com/temoto/robotstxt/scanner.go
new file mode 100644
index 000000000..157763799
--- /dev/null
+++ b/vendor/github.com/temoto/robotstxt/scanner.go
@@ -0,0 +1,205 @@
+package robotstxt
+
+import (
+ "bytes"
+ "fmt"
+ "go/token"
+ "io"
+ "os"
+ "unicode/utf8"
+)
+
+type byteScanner struct {
+ ErrorCount int
+ Quiet bool
+
+ buf []byte
+ pos token.Position
+ lastChunk bool
+ ch rune
+ keyTokenFound bool
+}
+
+var WhitespaceChars = []rune{' ', '\t', '\v'}
+
+func newByteScanner(srcname string, quiet bool) *byteScanner {
+ return &byteScanner{
+ Quiet: quiet,
+ ch: -1,
+ pos: token.Position{Filename: srcname},
+ }
+}
+
+func (s *byteScanner) Feed(input []byte, end bool) error {
+ s.buf = input
+ s.pos.Offset = 0
+ s.pos.Line = 1
+ s.pos.Column = 1
+ s.lastChunk = end
+
+ // Read first char into look-ahead buffer `s.ch`.
+ if err := s.nextChar(); err != nil {
+ return err
+ }
+
+ // Skip UTF-8 byte order mark
+ if s.ch == 65279 {
+ s.nextChar()
+ s.pos.Column = 1
+ }
+
+ return nil
+}
+
+func (s *byteScanner) GetPosition() token.Position {
+ return s.pos
+}
+
+func (s *byteScanner) Scan() (string, error) {
+ //println("--- Scan(). Offset / len(s.buf): ", s.pos.Offset, len(s.buf))
+
+ for {
+ // Note Offset > len, not >=, so we can Scan last character.
+ if s.lastChunk && s.pos.Offset > len(s.buf) {
+ return "", io.EOF
+ }
+
+ s.skipSpace()
+
+ if s.ch == -1 {
+ return "", io.EOF
+ }
+
+ // EOL
+ if s.isEol() {
+ s.keyTokenFound = false
+ // skip subsequent newline chars
+ for s.ch != -1 && s.isEol() {
+ s.nextChar()
+ }
+ // emit newline as separate token
+ return "\n", nil
+ }
+
+ // skip comments
+ if s.ch == '#' {
+ s.keyTokenFound = false
+ s.skipUntilEol()
+ // s.state = "start"
+ if s.ch == -1 {
+ return "", io.EOF
+ }
+ // emit newline as separate token
+ return "\n", nil
+ }
+
+ // else we found something
+ break
+ }
+
+ /*
+ if s.state == "start" {
+ s.state = "key"
+ }
+ */
+
+ var tok bytes.Buffer
+ tok.WriteRune(s.ch)
+ s.nextChar()
+ for s.ch != -1 && !s.isSpace() && !s.isEol() {
+ // Do not consider ":" to be a token separator if a first key token
+ // has already been found on this line (avoid cutting an absolute URL
+ // after the "http:")
+ if s.ch == ':' && !s.keyTokenFound {
+ // s.state = "pre-value"
+ s.nextChar()
+ s.keyTokenFound = true
+ break
+ }
+
+ tok.WriteRune(s.ch)
+ s.nextChar()
+ }
+ return tok.String(), nil
+}
+
+func (s *byteScanner) ScanAll() ([]string, error) {
+ var results []string
+ for {
+ t, err := s.Scan()
+ if t != "" {
+ results = append(results, t)
+ }
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return results, err
+ }
+ }
+ return results, nil
+}
+
+func (s *byteScanner) error(pos token.Position, msg string) {
+ s.ErrorCount++
+ if !s.Quiet {
+ fmt.Fprintf(os.Stderr, "robotstxt from %s: %s\n", pos.String(), msg)
+ }
+}
+
+func (s *byteScanner) isEol() bool {
+ return s.ch == '\n' || s.ch == '\r'
+}
+
+func (s *byteScanner) isSpace() bool {
+ for _, r := range WhitespaceChars {
+ if s.ch == r {
+ return true
+ }
+ }
+ return false
+}
+
+func (s *byteScanner) skipSpace() {
+ //println("--- string(ch): ", s.ch, ".")
+ for s.ch != -1 && s.isSpace() {
+ s.nextChar()
+ }
+}
+
+func (s *byteScanner) skipUntilEol() {
+ //println("--- string(ch): ", s.ch, ".")
+ for s.ch != -1 && !s.isEol() {
+ s.nextChar()
+ }
+ // skip subsequent newline chars
+ for s.ch != -1 && s.isEol() {
+ s.nextChar()
+ }
+}
+
+// Reads next Unicode char.
+func (s *byteScanner) nextChar() error {
+ //println("--- nextChar(). Offset / len(s.buf): ", s.pos.Offset, len(s.buf))
+
+ if s.pos.Offset >= len(s.buf) {
+ s.ch = -1
+ return io.EOF
+ }
+ s.pos.Column++
+ if s.ch == '\n' {
+ s.pos.Line++
+ s.pos.Column = 1
+ }
+ r, w := rune(s.buf[s.pos.Offset]), 1
+ if r >= 0x80 {
+ r, w = utf8.DecodeRune(s.buf[s.pos.Offset:])
+ if r == utf8.RuneError && w == 1 {
+ s.error(s.pos, "illegal UTF-8 encoding")
+ }
+ }
+ s.pos.Column++
+ s.pos.Offset += w
+ s.ch = r
+ return nil
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 0af5d43ae..76fd9fd60 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -49,6 +49,8 @@ github.com/NaverCloudPlatform/ncloud-sdk-go/sdk
github.com/NaverCloudPlatform/ncloud-sdk-go/common
github.com/NaverCloudPlatform/ncloud-sdk-go/request
github.com/NaverCloudPlatform/ncloud-sdk-go/oauth
+# github.com/PuerkitoBio/goquery v1.5.0
+github.com/PuerkitoBio/goquery
# github.com/Telmate/proxmox-api-go v0.0.0-20190410200643-f08824d5082d
github.com/Telmate/proxmox-api-go/proxmox
# github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190418113227-25233c783f4e
@@ -66,6 +68,12 @@ github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints
github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers
# github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20170113022742-e6dbea820a9f
github.com/aliyun/aliyun-oss-go-sdk/oss
+# github.com/andybalholm/cascadia v1.0.0
+github.com/andybalholm/cascadia
+# github.com/antchfx/htmlquery v1.0.0
+github.com/antchfx/htmlquery
+# github.com/antchfx/xmlquery v1.0.0
+github.com/antchfx/xmlquery
# github.com/antchfx/xpath v0.0.0-20170728053731-b5c552e1acbd
github.com/antchfx/xpath
# github.com/antchfx/xquery v0.0.0-20170730121040-eb8c3c172607
@@ -156,12 +164,29 @@ github.com/dustin/go-humanize
github.com/dylanmei/iso8601
# github.com/dylanmei/winrmtest v0.0.0-20170819153634-c2fbb09e6c08
github.com/dylanmei/winrmtest
+# github.com/fatih/camelcase v1.0.0
+github.com/fatih/camelcase
# github.com/fatih/color v1.7.0
github.com/fatih/color
+# github.com/fatih/structtag v1.0.0
+github.com/fatih/structtag
# github.com/ghodss/yaml v1.0.0
github.com/ghodss/yaml
# github.com/go-ini/ini v1.25.4
github.com/go-ini/ini
+# github.com/gobwas/glob v0.2.3
+github.com/gobwas/glob
+github.com/gobwas/glob/compiler
+github.com/gobwas/glob/syntax
+github.com/gobwas/glob/match
+github.com/gobwas/glob/syntax/ast
+github.com/gobwas/glob/util/runes
+github.com/gobwas/glob/syntax/lexer
+github.com/gobwas/glob/util/strings
+# github.com/gocolly/colly v1.2.0
+github.com/gocolly/colly
+github.com/gocolly/colly/debug
+github.com/gocolly/colly/storage
# github.com/gofrs/flock v0.7.1
github.com/gofrs/flock
# github.com/golang/protobuf v1.3.1
@@ -297,6 +322,8 @@ github.com/joyent/triton-go/client
github.com/json-iterator/go
# github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1
github.com/kardianos/osext
+# github.com/kennygrant/sanitize v1.2.4
+github.com/kennygrant/sanitize
# github.com/klauspost/compress v0.0.0-20160131094358-f86d2e6d8a77
github.com/klauspost/compress/flate
# github.com/klauspost/cpuid v0.0.0-20160106104451-349c67577817
@@ -387,6 +414,8 @@ github.com/renstrom/fuzzysearch/fuzzy
github.com/rwtodd/Go.Sed/sed
# github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735
github.com/ryanuber/go-glob
+# github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca
+github.com/saintfish/chardet
# github.com/satori/go.uuid v1.2.0
github.com/satori/go.uuid
# github.com/scaleway/scaleway-cli v0.0.0-20180921094345-7b12c9699d70
@@ -397,6 +426,8 @@ github.com/scaleway/scaleway-cli/pkg/sshcommand
github.com/sirupsen/logrus
# github.com/stretchr/testify v1.3.0
github.com/stretchr/testify/assert
+# github.com/temoto/robotstxt v0.0.0-20180810133444-97ee4a9ee6ea
+github.com/temoto/robotstxt
# github.com/tencentcloud/tencentcloud-sdk-go v0.0.0-20181220135002-f1744d40d346
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile
@@ -508,13 +539,13 @@ golang.org/x/net/http2
golang.org/x/net/html/charset
golang.org/x/net/context
golang.org/x/net/trace
+golang.org/x/net/html
golang.org/x/net/internal/socks
golang.org/x/net/http/httpguts
golang.org/x/net/http2/hpack
golang.org/x/net/idna
golang.org/x/net/context/ctxhttp
golang.org/x/net/publicsuffix
-golang.org/x/net/html
golang.org/x/net/internal/timeseries
golang.org/x/net/html/atom
# golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421
From 8e857d64f14adeea905adb4a24f27a89106a8f97 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Thu, 6 Jun 2019 16:45:37 +0200
Subject: [PATCH 28/97] aws: rewrap struct comments for documentation
generation
---
builder/amazon/common/access_config.go | 56 ++++++++-------
builder/amazon/common/ami_config.go | 34 ++++-----
builder/amazon/common/block_device.go | 80 +++++++++++++++++-----
builder/amazon/common/run_config.go | 95 +++++++++++++++++++++++---
builder/amazon/ebs/builder.go | 9 ++-
builder/amazon/ebssurrogate/builder.go | 17 +++--
builder/amazon/ebsvolume/builder.go | 27 ++++----
7 files changed, 229 insertions(+), 89 deletions(-)
diff --git a/builder/amazon/common/access_config.go b/builder/amazon/common/access_config.go
index 3dd5b2fa4..c1b9ed2fb 100644
--- a/builder/amazon/common/access_config.go
+++ b/builder/amazon/common/access_config.go
@@ -86,33 +86,39 @@ type AccessConfig struct {
// environmental variable.
Token string `mapstructure:"token" required:"false"`
session *session.Session
- // Get credentials from Hashicorp Vault's aws
- // secrets engine. You must already have created a role to use. For more
- // information about generating credentials via the Vault engine, see the
- // Vault
- // docs.
+ // Get credentials from Hashicorp Vault's aws secrets engine. You must
+ // already have created a role to use. For more information about
+ // generating credentials via the Vault engine, see the [Vault
+ // docs.](https://www.vaultproject.io/api/secret/aws/index.html#generate-credentials)
// If you set this flag, you must also set the below options:
- // - `name` (string) - Required. Specifies the name of the role to generate
- // credentials against. This is part of the request URL.
- // - `engine_name` (string) - The name of the aws secrets engine. In the
- // Vault docs, this is normally referred to as "aws", and Packer will
- // default to "aws" if `engine_name` is not set.
- // - `role_arn` (string)- The ARN of the role to assume if credential\_type
- // on the Vault role is assumed\_role. Must match one of the allowed role
- // ARNs in the Vault role. Optional if the Vault role only allows a single
- // AWS role ARN; required otherwise.
- // - `ttl` (string) - Specifies the TTL for the use of the STS token. This
- // is specified as a string with a duration suffix. Valid only when
- // credential\_type is assumed\_role or federation\_token. When not
- // specified, the default\_sts\_ttl set for the role will be used. If that
- // is also not set, then the default value of 3600s will be used. AWS
- // places limits on the maximum TTL allowed. See the AWS documentation on
- // the DurationSeconds parameter for AssumeRole (for assumed\_role
- // credential types) and GetFederationToken (for federation\_token
- // credential types) for more details.
+ // - `name` (string) - Required. Specifies the name of the role to generate
+ // credentials against. This is part of the request URL.
+ // - `engine_name` (string) - The name of the aws secrets engine. In the
+ // Vault docs, this is normally referred to as "aws", and Packer will
+ // default to "aws" if `engine_name` is not set.
+ // - `role_arn` (string)- The ARN of the role to assume if credential\_type
+ // on the Vault role is assumed\_role. Must match one of the allowed role
+ // ARNs in the Vault role. Optional if the Vault role only allows a single
+ // AWS role ARN; required otherwise.
+ // - `ttl` (string) - Specifies the TTL for the use of the STS token. This
+ // is specified as a string with a duration suffix. Valid only when
+ // credential\_type is assumed\_role or federation\_token. When not
+ // specified, the default\_sts\_ttl set for the role will be used. If that
+ // is also not set, then the default value of 3600s will be used. AWS
+ // places limits on the maximum TTL allowed. See the AWS documentation on
+ // the DurationSeconds parameter for AssumeRole (for assumed\_role
+ // credential types) and GetFederationToken (for federation\_token
+ // credential types) for more details.
//
- // Example:
- // `json { "vault_aws_engine": { "name": "myrole", "role_arn": "myarn",
+ // ``` json
+ // {
+ // "vault_aws_engine": {
+ // "name": "myrole",
+ // "role_arn": "myarn",
+ // "ttl": "3600s"
+ // }
+ // }
+ // ```
VaultAWSEngine VaultAWSEngineOptions `mapstructure:"vault_aws_engine" required:"false"`
getEC2Connection func() ec2iface.EC2API
diff --git a/builder/amazon/common/ami_config.go b/builder/amazon/common/ami_config.go
index efd919424..b43b0ed13 100644
--- a/builder/amazon/common/ami_config.go
+++ b/builder/amazon/common/ami_config.go
@@ -59,13 +59,12 @@ type AMIConfig struct {
// documentation on enabling enhanced
// networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
AMIENASupport *bool `mapstructure:"ena_support" required:"false"`
- // Enable enhanced networking (SriovNetSupport but
- // not ENA) on HVM-compatible AMIs. If true, add
- // ec2:ModifyInstanceAttribute to your AWS IAM policy. Note: you must make
- // sure enhanced networking is enabled on your instance. See Amazon's
- // documentation on enabling enhanced
- // networking.
- // Default false.
+ // Enable enhanced networking (SriovNetSupport but not ENA) on
+ // HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your
+ // AWS IAM policy. Note: you must make sure enhanced networking is enabled
+ // on your instance. See [Amazon's documentation on enabling enhanced
+ // networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
+ // Default `false`.
AMISriovNetSupport bool `mapstructure:"sriov_support" required:"false"`
// Force Packer to first deregister an existing
// AMI if one with the same name already exists. Default false.
@@ -87,20 +86,21 @@ type AMIConfig struct {
// This field is validated by Packer, when using an alias, you will have to
// prefix `kms_key_id` with `alias/`.
AMIKmsKeyId string `mapstructure:"kms_key_id" required:"false"`
- // a map of regions to copy the ami to, along with the custom kms key id
- // (alias or arn) to use for encryption for that region. Keys must match
- // the regions provided in ami_regions. If you just want to encrypt using a
- // default ID, you can stick with kms_key_id and ami_regions. If you want a
+ // regions to copy the ami to, along with the custom kms key id (alias or
+ // arn) to use for encryption for that region. Keys must match the regions
+ // provided in `ami_regions`. If you just want to encrypt using a default
+ // ID, you can stick with `kms_key_id` and `ami_regions`. If you want a
// region to be encrypted with that region's default key ID, you can use an
- // empty string "" instead of a key id in this map. (e.g. "us-east-1": "")
- // However, you cannot use default key IDs if you are using this in
- // conjunction with snapshot_users -- in that situation you must use custom
- // keys. For valid formats see KmsKeyId in the AWS API docs - CopyImage.
+ // empty string `""` instead of a key id in this map. (e.g. `"us-east-1":
+ // ""`) However, you cannot use default key IDs if you are using this in
+ // conjunction with `snapshot_users` -- in that situation you must use
+ // custom keys. For valid formats see *KmsKeyId* in the [AWS API docs -
+ // CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
AMIRegionKMSKeyIDs map[string]string `mapstructure:"region_kms_key_ids" required:"false"`
// Tags to apply to snapshot.
// They will override AMI tags if already applied to snapshot. This is a
- // template engine, see Build template
- // data for more information.
+ // [template engine](../templates/engine.html), see [Build template
+ // data](#build-template-data) for more information.
SnapshotTags TagMap `mapstructure:"snapshot_tags" required:"false"`
// A list of account IDs that have
// access to create volumes from the snapshot(s). By default no additional
diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go
index 494a71e9c..39dabb60d 100644
--- a/builder/amazon/common/block_device.go
+++ b/builder/amazon/common/block_device.go
@@ -69,27 +69,75 @@ type BlockDevices struct {
}
type AMIBlockDevices struct {
- // Add one or
- // more block device
- // mappings
+ // Add one or more [block device
+ // mappings](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html)
// to the AMI. These will be attached when booting a new instance from your
- // AMI. If this field is populated, and you are building from an existing source image,
- // the block device mappings in the source image will be overwritten. This means you
- // must have a block device mapping entry for your root volume, root_volume_size,
- // and root_device_name. `Your options here may vary depending on the type of VM
- // you use. The block device mappings allow for the following configuration:
+ // AMI. To add a block device during the Packer build see
+ // `launch_block_device_mappings` below. Your options here may vary
+ // depending on the type of VM you use. The block device mappings allow for
+ // the following configuration:
+ // - `delete_on_termination` (boolean) - Indicates whether the EBS volume is
+ // deleted on instance termination. Default `false`. **NOTE**: If this
+ // value is not explicitly set to `true` and volumes are not cleaned up by
+ // an alternative method, additional volumes will accumulate after every
+ // build.
+ //
+ // - `device_name` (string) - The device name exposed to the instance (for
+ // example, `/dev/sdh` or `xvdh`). Required for every device in the block
+ // device mapping.
+ //
+ // - `encrypted` (boolean) - Indicates whether or not to encrypt the volume.
+ // By default, Packer will keep the encryption setting to what it was in
+ // the source image. Setting `false` will result in an unencrypted device,
+ // and `true` will result in an encrypted one.
+ //
+ // - `iops` (number) - The number of I/O operations per second (IOPS) that
+ // the volume supports. See the documentation on
+ // [IOPs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html)
+ // for more information
+ //
+ // - `kms_key_id` (string) - The ARN for the KMS encryption key. When
+ // specifying `kms_key_id`, `encrypted` needs to be set to `true`. For
+ // valid formats see *KmsKeyId* in the [AWS API docs -
+ // CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
+ //
+ // - `no_device` (boolean) - Suppresses the specified device included in the
+ // block device mapping of the AMI.
+ //
+ // - `snapshot_id` (string) - The ID of the snapshot.
+ //
+ // - `virtual_name` (string) - The virtual device name. See the
+ // documentation on [Block Device
+ // Mapping](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html)
+ // for more information.
+ //
+ // - `volume_size` (number) - The size of the volume, in GiB. Required if
+ // not specifying a `snapshot_id`.
+ //
+ // - `volume_type` (string) - The volume type. `gp2` for General Purpose
+ // (SSD) volumes, `io1` for Provisioned IOPS (SSD) volumes, `st1` for
+ // Throughput Optimized HDD, `sc1` for Cold HDD, and `standard` for
+ // Magnetic volumes.
AMIMappings []BlockDevice `mapstructure:"ami_block_device_mappings" required:"false"`
}
type LaunchBlockDevices struct {
- // Add one
- // or more block devices before the Packer build starts. If you add instance
- // store volumes or EBS volumes in addition to the root device volume, the
- // created AMI will contain block device mapping information for those
- // volumes. Amazon creates snapshots of the source instance's root volume and
- // any other EBS volumes described here. When you launch an instance from this
- // new AMI, the instance automatically launches with these additional volumes,
- // and will restore them from snapshots taken from the source instance.
+ // Add one or more block devices before the Packer build starts. If you add
+ // instance store volumes or EBS volumes in addition to the root device
+ // volume, the created AMI will contain block device mapping information
+ // for those volumes. Amazon creates snapshots of the source instance's
+ // root volume and any other EBS volumes described here. When you launch an
+ // instance from this new AMI, the instance automatically launches with
+ // these additional volumes, and will restore them from snapshots taken
+ // from the source instance.
+ //
+ // In addition to the fields available in ami_block_device_mappings, you
+ // may optionally use the following field:
+ // - `omit_from_artifact` (boolean) - If true, this block device will not
+ // be snapshotted and the created AMI will not contain block device mapping
+ // information for this volume. If false, the block device will be mapped
+ // into the final created AMI. Set this option to true if you need a block
+ // device mounted in the surrogate AMI but not in the final created AMI.
LaunchMappings []BlockDevice `mapstructure:"launch_block_device_mappings" required:"false"`
}
diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go
index 11c1f0e14..0f038f0d6 100644
--- a/builder/amazon/common/run_config.go
+++ b/builder/amazon/common/run_config.go
@@ -149,14 +149,14 @@ type RunConfig struct {
// `security_group_ids`. Any filter described in the docs for
// [DescribeSecurityGroups](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
// is valid.
-
+ //
// `security_group_ids` take precedence over this.
SecurityGroupFilter SecurityGroupFilterOptions `mapstructure:"security_group_filter" required:"false"`
- // Tags to apply to the instance
- // that is launched to create the AMI. These tags are not applied to the
- // resulting AMI unless they're duplicated in tags. This is a template
- // engine, see Build template
- // data for more information.
+ // Tags to apply to the instance that is *launched* to create the AMI.
+ // These tags are *not* applied to the resulting AMI unless they're
+ // duplicated in `tags`. This is a [template
+ // engine](/docs/templates/engine.html), see [Build template
+ // data](#build-template-data) for more information.
RunTags map[string]string `mapstructure:"run_tags" required:"false"`
// The ID (not the name) of the security
// group to assign to the instance. By default this is not set and Packer will
@@ -173,8 +173,47 @@ type RunConfig struct {
// AMI with a root volume snapshot that you have access to. Note: this is not
// used when from_scratch is set to true.
SourceAmi string `mapstructure:"source_ami" required:"true"`
- // Filters used to populate the source_ami
+ // Filters used to populate the `source_ami`
// field. Example:
+ //
+ // ``` json
+ // {
+ // "source_ami_filter": {
+ // "filters": {
+ // "virtualization-type": "hvm",
+ // "name": "ubuntu/images/\*ubuntu-xenial-16.04-amd64-server-\*",
+ // "root-device-type": "ebs"
+ // },
+ // "owners": ["099720109477"],
+ // "most_recent": true
+ // }
+ // }
+ // ```
+ //
+ // This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
+ // This will fail unless *exactly* one AMI is returned. In the above example,
+ // `most_recent` will cause this to succeed by selecting the newest image.
+ //
+ // - `filters` (map of strings) - filters used to select a `source_ami`.
+ // NOTE: This will fail unless *exactly* one AMI is returned. Any filter
+ // described in the docs for
+ // [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
+ // is valid.
+ //
+ // - `owners` (array of strings) - Filters the images by their owner. You
+ // may specify one or more AWS account IDs, "self" (which will use the
+ // account whose credentials you are using to run Packer), or an AWS owner
+ // alias: for example, `amazon`, `aws-marketplace`, or `microsoft`. This
+ // option is required for security reasons.
+ //
+ // - `most_recent` (boolean) - Selects the newest created image when true.
+ // This is most useful for selecting a daily distro build.
+ //
+ // You may set this in place of `source_ami` or in conjunction with it. If you
+ // set this in conjunction with `source_ami`, the `source_ami` will be added
+ // to the filter. The provided `source_ami` must meet all of the filtering
+ // criteria provided in `source_ami_filter`; this pins the AMI returned by the
+ // filter, but will cause Packer to fail if the `source_ami` does not exist.
SourceAmiFilter AmiFilterOptions `mapstructure:"source_ami_filter" required:"false"`
// a list of acceptable instance
// types to run your build on. We will request a spot instance using the max
@@ -203,8 +242,39 @@ type RunConfig struct {
// Requires spot_price to be
// set. This tells Packer to apply tags to the spot request that is issued.
SpotTags map[string]string `mapstructure:"spot_tags" required:"false"`
- // Filters used to populate the subnet_id field.
+ // Filters used to populate the `subnet_id` field.
// Example:
+ //
+ // ``` json
+ // {
+ // "subnet_filter": {
+ // "filters": {
+ // "tag:Class": "build"
+ // },
+ // "most_free": true,
+ // "random": false
+ // }
+ // }
+ // ```
+ //
+ // This selects the Subnet with tag `Class` with the value `build`, which has
+ // the most free IP addresses. NOTE: This will fail unless *exactly* one
+ // Subnet is returned. By using `most_free` or `random` one will be selected
+ // from those matching the filter.
+ //
+ // - `filters` (map of strings) - filters used to select a `subnet_id`.
+ // NOTE: This will fail unless *exactly* one Subnet is returned. Any
+ // filter described in the docs for
+ // [DescribeSubnets](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html)
+ // is valid.
+ //
+ // - `most_free` (boolean) - The Subnet with the most free IPv4 addresses
+ // will be used if multiple Subnets matches the filter.
+ //
+ // - `random` (boolean) - A random Subnet will be used if multiple Subnets
+ // matches the filter. `most_free` have precendence over this.
+ //
+ // `subnet_id` take precedence over this.
SubnetFilter SubnetFilterOptions `mapstructure:"subnet_filter" required:"false"`
// If using VPC, the ID of the subnet, such as
// subnet-12345def, where Packer will launch the EC2 instance. This field is
@@ -212,10 +282,13 @@ type RunConfig struct {
SubnetId string `mapstructure:"subnet_id" required:"false"`
// The name of the temporary key pair to
// generate. By default, Packer generates a name that looks like
- // packer_, where is a 36 character unique identifier.
+ // `packer_`, where <UUID> is a 36 character unique identifier.
TemporaryKeyPairName string `mapstructure:"temporary_key_pair_name" required:"false"`
- // A list of IPv4
- // CIDR blocks to be authorized access to the instance, when packer is creating a temporary security group.
+ // A list of IPv4 CIDR blocks to be authorized access to the instance, when
+ // packer is creating a temporary security group.
+ //
+ // The default is [`0.0.0.0/0`] (i.e., allow any IPv4 source). This is only
+ // used when `security_group_id` or `security_group_ids` is not specified.
TemporarySGSourceCidrs []string `mapstructure:"temporary_security_group_source_cidrs" required:"false"`
// User data to apply when launching the instance. Note
// that you need to be careful about escaping characters due to the templates
diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go
index d847f9d57..4554e3070 100644
--- a/builder/amazon/ebs/builder.go
+++ b/builder/amazon/ebs/builder.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
// The amazonebs package contains a packer.Builder implementation that
// builds AMIs for Amazon EC2.
//
@@ -28,7 +30,12 @@ type Config struct {
awscommon.AMIConfig `mapstructure:",squash"`
awscommon.BlockDevices `mapstructure:",squash"`
awscommon.RunConfig `mapstructure:",squash"`
- VolumeRunTags awscommon.TagMap `mapstructure:"run_volume_tags"`
+ // Tags to apply to the volumes that are *launched* to create the AMI.
+ // These tags are *not* applied to the resulting AMI unless they're
+ // duplicated in `tags`. This is a [template
+ // engine](/docs/templates/engine.html), see [Build template
+ // data](#build-template-data) for more information.
+ VolumeRunTags awscommon.TagMap `mapstructure:"run_volume_tags"`
ctx interpolate.Context
}
diff --git a/builder/amazon/ebssurrogate/builder.go b/builder/amazon/ebssurrogate/builder.go
index 92ca1a76f..e48f5bdf3 100644
--- a/builder/amazon/ebssurrogate/builder.go
+++ b/builder/amazon/ebssurrogate/builder.go
@@ -27,10 +27,19 @@ type Config struct {
awscommon.RunConfig `mapstructure:",squash"`
awscommon.BlockDevices `mapstructure:",squash"`
awscommon.AMIConfig `mapstructure:",squash"`
- // A block device mapping
- // describing the root device of the AMI. This looks like the mappings in
- // ami_block_device_mapping, except with an additional field:
- RootDevice RootBlockDevice `mapstructure:"ami_root_device" required:"true"`
+ // A block device mapping describing the root device of the AMI. This looks
+ // like the mappings in `ami_block_device_mapping`, except with an
+ // additional field:
+ //
+ // - `source_device_name` (string) - The device name of the block device on
+ // the source instance to be used as the root device for the AMI. This
+ // must correspond to a block device in `launch_block_device_mapping`.
+ RootDevice RootBlockDevice `mapstructure:"ami_root_device" required:"true"`
+ // Tags to apply to the volumes that are *launched* to create the AMI.
+ // These tags are *not* applied to the resulting AMI unless they're
+ // duplicated in `tags`. This is a [template
+ // engine](/docs/templates/engine.html), see [Build template
+ // data](#build-template-data) for more information.
VolumeRunTags awscommon.TagMap `mapstructure:"run_volume_tags"`
// what architecture to use when registering the
// final AMI; valid options are "x86_64" or "arm64". Defaults to "x86_64".
diff --git a/builder/amazon/ebsvolume/builder.go b/builder/amazon/ebsvolume/builder.go
index 3556dcc61..b715f2726 100644
--- a/builder/amazon/ebsvolume/builder.go
+++ b/builder/amazon/ebsvolume/builder.go
@@ -27,22 +27,19 @@ type Config struct {
// Add the block device
// mappings to the AMI. The block device mappings allow for keys:
VolumeMappings []BlockDevice `mapstructure:"ebs_volumes" required:"false"`
- // Enable enhanced networking (ENA but not
- // SriovNetSupport) on HVM-compatible AMIs. If set, add
- // ec2:ModifyInstanceAttribute to your AWS IAM policy. If false, this will
- // disable enhanced networking in the final AMI as opposed to passing the
- // setting through unchanged from the source. Note: you must make sure
- // enhanced networking is enabled on your instance. See Amazon's
- // documentation on enabling enhanced
- // networking.
+ // Enable enhanced networking (ENA but not SriovNetSupport) on
+ // HVM-compatible AMIs. If set, add ec2:ModifyInstanceAttribute to your AWS
+ // IAM policy. If false, this will disable enhanced networking in the final
+ // AMI as opposed to passing the setting through unchanged from the source.
+ // Note: you must make sure enhanced networking is enabled on your
+ // instance. See Amazon's documentation on enabling enhanced networking.
AMIENASupport *bool `mapstructure:"ena_support" required:"false"`
- // Enable enhanced networking (SriovNetSupport but
- // not ENA) on HVM-compatible AMIs. If true, add
- // ec2:ModifyInstanceAttribute to your AWS IAM policy. Note: you must make
- // sure enhanced networking is enabled on your instance. See Amazon's
- // documentation on enabling enhanced
- // networking.
- // Default false.
+ // Enable enhanced networking (SriovNetSupport but not ENA) on
+ // HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your
+ // AWS IAM policy. Note: you must make sure enhanced networking is enabled
+ // on your instance. See [Amazon's documentation on enabling enhanced
+ // networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
+ // Default `false`.
AMISriovNetSupport bool `mapstructure:"sriov_support" required:"false"`
launchBlockDevices awscommon.BlockDevices
From d81ca5728d6056f8ac8db8359779eef8ada63537 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Thu, 6 Jun 2019 16:45:44 +0200
Subject: [PATCH 29/97] azure-arm: rewrap struct comments for documentation
generation
---
builder/azure/arm/config.go | 142 ++++++++++++++++++++++++++----------
1 file changed, 102 insertions(+), 40 deletions(-)
diff --git a/builder/azure/arm/config.go b/builder/azure/arm/config.go
index e28d046d9..d577584cf 100644
--- a/builder/azure/arm/config.go
+++ b/builder/azure/arm/config.go
@@ -89,53 +89,76 @@ type Config struct {
// Capture
CaptureNamePrefix string `mapstructure:"capture_name_prefix"`
CaptureContainerName string `mapstructure:"capture_container_name"`
- // Use a Shared Gallery
- // image
- // as the source for this build. VHD targets are incompatible with this build
- // type - the target must be a Managed Image.
+ // Use a [Shared Gallery
+ // image](https://azure.microsoft.com/en-us/blog/announcing-the-public-preview-of-shared-image-gallery/)
+ // as the source for this build. *VHD targets are incompatible with this
+ // build type* - the target must be a *Managed Image*.
+ //
+ // "shared_image_gallery": {
+ // "subscription": "00000000-0000-0000-0000-00000000000",
+ // "resource_group": "ResourceGroup",
+ // "gallery_name": "GalleryName",
+ // "image_name": "ImageName",
+ // "image_version": "1.0.0"
+ // }
+ // "managed_image_name": "TargetImageName",
+ // "managed_image_resource_group_name": "TargetResourceGroup"
SharedGallery SharedImageGallery `mapstructure:"shared_image_gallery" required:"false"`
// PublisherName for your base image. See
- // documentation
+ // [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/)
// for details.
+ //
+ // CLI example `az vm image list-publishers --location westus`
ImagePublisher string `mapstructure:"image_publisher" required:"true"`
// Offer for your base image. See
- // documentation
+ // [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/)
// for details.
+ //
+ // CLI example
+ // `az vm image list-offers --location westus --publisher Canonical`
ImageOffer string `mapstructure:"image_offer" required:"true"`
// SKU for your base image. See
- // documentation
+ // [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/)
// for details.
+ //
+ // CLI example
+ // `az vm image list-skus --location westus --publisher Canonical --offer UbuntuServer`
ImageSku string `mapstructure:"image_sku" required:"true"`
// Specify a specific version of an OS to boot from.
- // Defaults to latest. There may be a difference in versions available
+ // Defaults to `latest`. There may be a difference in versions available
// across regions due to image synchronization latency. To ensure a consistent
// version across regions set this value to one that is available in all
// regions where you are deploying.
+ //
+ // CLI example
+ // `az vm image list --location westus --publisher Canonical --offer UbuntuServer --sku 16.04.0-LTS --all`
ImageVersion string `mapstructure:"image_version" required:"false"`
// Specify a custom VHD to use. If this value is set, do
// not set image_publisher, image_offer, image_sku, or image_version.
ImageUrl string `mapstructure:"image_url" required:"false"`
- // Specify the source
- // managed image's resource group used to use. If this value is set, do not
- // set image_publisher, image_offer, image_sku, or image_version. If this
- // value is set, the value custom_managed_image_name must also be set. See
- // documentation
+ // Specify the source managed image's resource group used to use. If this
+ // value is set, do not set image\_publisher, image\_offer, image\_sku, or
+ // image\_version. If this value is set, the value
+ // `custom_managed_image_name` must also be set. See
+ // [documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images)
// to learn more about managed images.
CustomManagedImageResourceGroupName string `mapstructure:"custom_managed_image_resource_group_name" required:"false"`
- // Specify the source managed image's
- // name to use. If this value is set, do not set image_publisher,
- // image_offer, image_sku, or image_version. If this value is set, the
- // value custom_managed_image_resource_group_name must also be set. See
- // documentation
+ // Specify the source managed image's name to use. If this value is set, do
+ // not set image\_publisher, image\_offer, image\_sku, or image\_version.
+ // If this value is set, the value
+ // `custom_managed_image_resource_group_name` must also be set. See
+ // [documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images)
// to learn more about managed images.
CustomManagedImageName string `mapstructure:"custom_managed_image_name" required:"false"`
customManagedImageID string
Location string `mapstructure:"location"`
- // Size of the VM used for building. This can be changed
- // when you deploy a VM from your VHD. See
- // pricing
- // information. Defaults to Standard_A1.
+ // Size of the VM used for building. This can be changed when you deploy a
+ // VM from your VHD. See
+ // [pricing](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/)
+ // information. Defaults to `Standard_A1`.
+ //
+ // CLI example `az vm list-sizes --location westus`
VMSize string `mapstructure:"vm_size" required:"false"`
ManagedImageResourceGroupName string `mapstructure:"managed_image_resource_group_name"`
@@ -155,8 +178,9 @@ type Config struct {
// captured.
ManagedImageDataDiskSnapshotPrefix string `mapstructure:"managed_image_data_disk_snapshot_prefix" required:"false"`
manageImageLocation string
- // Store the image in zone-resilient storage. You need to create it
- // in a region that supports availability zones.
+ // Store the image in zone-resilient storage. You need to create it in a
+ // region that supports [availability
+ // zones](https://docs.microsoft.com/en-us/azure/availability-zones/az-overview).
ManagedImageZoneResilient bool `mapstructure:"managed_image_zone_resilient" required:"false"`
// the user can define up to 15
// tags. Tag names cannot exceed 512 characters, and tag values cannot exceed
@@ -195,20 +219,48 @@ type Config struct {
// containing the virtual network. If the resource group cannot be found, or
// it cannot be disambiguated, this value should be set.
VirtualNetworkResourceGroupName string `mapstructure:"virtual_network_resource_group_name" required:"false"`
- // Specify a file containing custom data to inject
- // into the cloud-init process. The contents of the file are read and injected
- // into the ARM template. The custom data will be passed to cloud-init for
- // processing at the time of provisioning. See
- // documentation
+ // Specify a file containing custom data to inject into the cloud-init
+ // process. The contents of the file are read and injected into the ARM
+ // template. The custom data will be passed to cloud-init for processing at
+ // the time of provisioning. See
+ // [documentation](http://cloudinit.readthedocs.io/en/latest/topics/examples.html)
// to learn more about custom data, and how it can be used to influence the
// provisioning process.
CustomDataFile string `mapstructure:"custom_data_file" required:"false"`
customData string
- // Used for creating images from Marketplace images.
- // Please refer to Deploy an image with Marketplace
- // terms for more details. Not
- // all Marketplace images support programmatic deployment, and support is
- // controlled by the image publisher.
+ // Used for creating images from Marketplace images. Please refer to
+ // [Deploy an image with Marketplace
+ // terms](https://aka.ms/azuremarketplaceapideployment) for more details.
+ // Not all Marketplace images support programmatic deployment, and support
+ // is controlled by the image publisher.
+ //
+ // An example plan\_info object is defined below.
+ //
+ // ``` json
+ // {
+ // "plan_info": {
+ // "plan_name": "rabbitmq",
+ // "plan_product": "rabbitmq",
+ // "plan_publisher": "bitnami"
+ // }
+ // }
+ // ```
+ //
+ // `plan_name` (string) - The plan name, required. `plan_product` (string) -
+ // The plan product, required. `plan_publisher` (string) - The plan publisher,
+ // required. `plan_promotion_code` (string) - Some images accept a promotion
+ // code, optional.
+ //
+ // Images created from the Marketplace with `plan_info` **must** specify
+ // `plan_info` whenever the image is deployed. The builder automatically adds
+ // tags to the image to ensure this information is not lost. The following
+ // tags are added.
+ //
+ // 1. PlanName
+ // 2. PlanProduct
+ // 3. PlanPublisher
+ // 4. PlanPromotionCode
+ //
PlanInfo PlanInformation `mapstructure:"plan_info" required:"false"`
// If either Linux or Windows is specified Packer will
// automatically configure authentication credentials for the provisioned
@@ -218,14 +270,24 @@ type Config struct {
// Specify the size of the OS disk in GB
// (gigabytes). Values of zero or less than zero are ignored.
OSDiskSizeGB int32 `mapstructure:"os_disk_size_gb" required:"false"`
- // The size(s) of any additional
- // hard disks for the VM in gigabytes. If this is not specified then the VM
- // will only contain an OS disk. The number of additional disks and maximum
- // size of a disk depends on the configuration of your VM. See
- // Windows
+ // The size(s) of any additional hard disks for the VM in gigabytes. If
+ // this is not specified then the VM will only contain an OS disk. The
+ // number of additional disks and maximum size of a disk depends on the
+ // configuration of your VM. See
+ // [Windows](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/about-disks-and-vhds)
// or
- // Linux
+ // [Linux](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/about-disks-and-vhds)
// for more information.
+ //
+ // For VHD builds the final artifacts will be named
+ // `PREFIX-dataDisk-.UUID.vhd` and stored in the specified capture
+ // container along side the OS disk. The additional disks are included in
+ // the deployment template `PREFIX-vmTemplate.UUID`.
+ //
+ // For Managed build the final artifacts are included in the managed image.
+ // The additional disk will have the same storage account type as the OS
+ // disk, as specified with the `managed_image_storage_account_type`
+ // setting.
AdditionalDiskSize []int32 `mapstructure:"disk_additional_size" required:"false"`
// Specify the disk caching type. Valid values
// are None, ReadOnly, and ReadWrite. The default value is ReadWrite.
From 696fce973921d9e74b78ea5758c30b65dc6a6c9a Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Thu, 6 Jun 2019 16:45:50 +0200
Subject: [PATCH 30/97] docker: rewrap struct comments for documentation
generation
---
builder/docker/config.go | 107 ++++++++++++++++++++++-----------------
1 file changed, 60 insertions(+), 47 deletions(-)
diff --git a/builder/docker/config.go b/builder/docker/config.go
index 20feb885b..b37d63d0c 100644
--- a/builder/docker/config.go
+++ b/builder/docker/config.go
@@ -26,67 +26,80 @@ type Config struct {
common.PackerConfig `mapstructure:",squash"`
Comm communicator.Config `mapstructure:",squash"`
- Author string
- Changes []string
- Commit bool
- // The directory inside container to mount temp
- // directory from host server for work file
- // provisioner. This defaults to
- // c:/packer-files on windows and /packer-files on other systems.
+ // Set the author (e-mail) of a commit.
+ Author string `mapstructure:"author"`
+ // Dockerfile instructions to add to the commit. Example of instructions
+ // are CMD, ENTRYPOINT, ENV, and EXPOSE. Example: [ "USER ubuntu", "WORKDIR
+ // /app", "EXPOSE 8080" ]
+ Changes []string `mapstructure:"changes"`
+ // If true, the container will be committed to an image rather than exported.
+ Commit bool `mapstructure:"commit" required:"true"`
+
+ // The directory inside container to mount temp directory from host server
+ // for work file provisioner. This defaults to c:/packer-files on windows
+ // and /packer-files on other systems.
ContainerDir string `mapstructure:"container_dir" required:"false"`
- Discard bool
- // Username (UID) to run remote commands with. You can
- // also set the group name/ID if you want: (UID or UID:GID).
- // You may need this if you get permission errors trying to run the shell or
- // other provisioners.
- ExecUser string `mapstructure:"exec_user" required:"false"`
- ExportPath string `mapstructure:"export_path"`
- Image string
- Message string
- // If true, run the docker container with the
- // --privileged flag. This defaults to false if not set.
+ // Throw away the container when the build is complete. This is useful for
+ // the [artifice
+ // post-processor](https://www.packer.io/docs/post-processors/artifice.html).
+ Discard bool `mapstructure:"discard" required:"true"`
+ // Username (UID) to run remote commands with. You can also set the group
+ // name/ID if you want: (UID or UID:GID). You may need this if you get
+ // permission errors trying to run the shell or other provisioners.
+ ExecUser string `mapstructure:"exec_user" required:"false"`
+ // The path where the final container will be exported as a tar file.
+ ExportPath string `mapstructure:"export_path" required:"true"`
+ // The base image for the Docker container that will be started. This image
+ // will be pulled from the Docker registry if it doesn't already exist.
+ Image string `mapstructure:"image" required:"true"`
+ // Set a message for the commit.
+ Message string `mapstructure:"message" required:"true"`
+ // If true, run the docker container with the `--privileged` flag. This
+ // defaults to false if not set.
Privileged bool `mapstructure:"privileged" required:"false"`
Pty bool
- Pull bool
- // An array of arguments to pass to
- // docker run in order to run the container. By default this is set to
- // ["-d", "-i", "-t", "--entrypoint=/bin/sh", "--", "{{.Image}}"] if you are
- // using a linux container, and
- // ["-d", "-i", "-t", "--entrypoint=powershell", "--", "{{.Image}}"] if you
- // are running a windows container. {{.Image}} is a template variable that
- // corresponds to the image template option. Passing the entrypoint option
- // this way will make it the default entrypoint of the resulting image, so
- // running docker run -it --rm will start the docker image from the
- // /bin/sh shell interpreter; you could run a script or another shell by
- // running docker run -it --rm -c /bin/bash. If your docker image
- // embeds a binary intended to be run often, you should consider changing the
- // default entrypoint to point to it.
+ // If true, the configured image will be pulled using `docker pull` prior
+ // to use. Otherwise, it is assumed the image already exists and can be
+ // used. This defaults to true if not set.
+ Pull bool `mapstructure:"pull" required:"false"`
+ // An array of arguments to pass to docker run in order to run the
+ // container. By default this is set to ["-d", "-i", "-t",
+ // "--entrypoint=/bin/sh", "--", "{{.Image}}"] if you are using a linux
+ // container, and ["-d", "-i", "-t", "--entrypoint=powershell", "--",
+ // "{{.Image}}"] if you are running a windows container. {{.Image}} is a
+ // template variable that corresponds to the image template option. Passing
+ // the entrypoint option this way will make it the default entrypoint of
+ // the resulting image, so running docker run -it --rm will start the
+ // docker image from the /bin/sh shell interpreter; you could run a script
+ // or another shell by running docker run -it --rm -c /bin/bash. If your
+ // docker image embeds a binary intended to be run often, you should
+ // consider changing the default entrypoint to point to it.
RunCommand []string `mapstructure:"run_command" required:"false"`
- Volumes map[string]string
- // If true, files uploaded to the container
- // will be owned by the user the container is running as. If false, the owner
- // will depend on the version of docker installed in the system. Defaults to
- // true.
+ // A mapping of additional volumes to mount into this container. The key of
+ // the object is the host path, the value is the container path.
+ Volumes map[string]string `mapstructure:"volumes" required:"false"`
+ // If true, files uploaded to the container will be owned by the user the
+ // container is running as. If false, the owner will depend on the version
+ // of docker installed in the system. Defaults to true.
FixUploadOwner bool `mapstructure:"fix_upload_owner" required:"false"`
- // If "true", tells Packer that you are building a
- // Windows container running on a windows host. This is necessary for building
- // Windows containers, because our normal docker bindings do not work for them.
+ // If "true", tells Packer that you are building a Windows container
+ // running on a windows host. This is necessary for building Windows
+ // containers, because our normal docker bindings do not work for them.
WindowsContainer bool `mapstructure:"windows_container" required:"false"`
// This is used to login to dockerhub to pull a private base container. For
// pushing to dockerhub, see the docker post-processors
- Login bool
+ Login bool `mapstructure:"login" required:"false"`
// The password to use to authenticate to login.
LoginPassword string `mapstructure:"login_password" required:"false"`
// The server address to login to.
LoginServer string `mapstructure:"login_server" required:"false"`
// The username to use to authenticate to login.
LoginUsername string `mapstructure:"login_username" required:"false"`
- // Defaults to false. If true, the builder will login
- // in order to pull the image from Amazon EC2 Container Registry
- // (ECR). The builder only logs in for the
- // duration of the pull. If true login_server is required and login,
- // login_username, and login_password will be ignored. For more
+ // Defaults to false. If true, the builder will login in order to pull the
+ // image from Amazon EC2 Container Registry (ECR). The builder only logs in
+ // for the duration of the pull. If true login_server is required and
+ // login, login_username, and login_password will be ignored. For more
// information see the section on ECR.
EcrLogin bool `mapstructure:"ecr_login" required:"false"`
AwsAccessConfig `mapstructure:",squash"`
@@ -125,7 +138,7 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
// Default Pull if it wasn't set
hasPull := false
for _, k := range md.Keys {
- if k == "Pull" {
+ if k == "pull" {
hasPull = true
break
}
From 8df433be470c18028c49b5994d4070a02bde2996 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Thu, 6 Jun 2019 16:46:12 +0200
Subject: [PATCH 31/97] aws: use auto-generated partials
---
.../docs/builders/amazon-chroot.html.md.erb | 312 +----------
.../docs/builders/amazon-ebs.html.md.erb | 487 +----------------
.../builders/amazon-ebssurrogate.html.md.erb | 488 +----------------
.../builders/amazon-ebsvolume.html.md.erb | 481 +----------------
.../docs/builders/amazon-instance.html.md.erb | 490 +-----------------
5 files changed, 42 insertions(+), 2216 deletions(-)
diff --git a/website/source/docs/builders/amazon-chroot.html.md.erb b/website/source/docs/builders/amazon-chroot.html.md.erb
index 25d958956..3fc819bf0 100644
--- a/website/source/docs/builders/amazon-chroot.html.md.erb
+++ b/website/source/docs/builders/amazon-chroot.html.md.erb
@@ -57,315 +57,15 @@ each category, the available configuration keys are alphabetized.
### Required:
-- `access_key` (string) - The access key used to communicate with AWS. [Learn
- how to set this](/docs/builders/amazon.html#specifying-amazon-credentials)
-
-- `ami_name` (string) - The name of the resulting AMI that will appear when
- managing AMIs in the AWS console or via APIs. This must be unique. To help
- make this unique, use a function like `timestamp` (see [template
- engine](/docs/templates/engine.html) for more info).
-
-- `secret_key` (string) - The secret key used to communicate with AWS. [Learn
- how to set this](/docs/builders/amazon.html#specifying-amazon-credentials)
-
-- `source_ami` (string) - The source AMI whose root volume will be copied and
- provisioned on the currently running instance. This must be an EBS-backed
- AMI with a root volume snapshot that you have access to. Note: this is not
- used when `from_scratch` is set to `true`.
+<%= partial "partials/builder/amazon/common/AccessConfig-required" %>
+<%= partial "partials/builder/amazon/chroot/Config-required" %>
### Optional:
-- `ami_architecture` (string) - what architecture to use when registering the
- final AMI; valid options are "x86_64" or "arm64". Defaults to "x86_64".
-
-- `ami_description` (string) - The description to set for the resulting
- AMI(s). By default this description is empty. This is a [template
- engine](/docs/templates/engine.html), see [Build template
- data](#build-template-data) for more information.
-
-- `ami_groups` (array of strings) - A list of groups that have access to
- launch the resulting AMI(s). By default no groups have permission to launch
- the AMI. `all` will make the AMI publicly accessible.
-
-- `ami_product_codes` (array of strings) - A list of product codes to
- associate with the AMI. By default no product codes are associated with the
- AMI.
-
-- `ami_regions` (array of strings) - A list of regions to copy the AMI to.
- Tags and attributes are copied along with the AMI. AMI copying takes time
- depending on the size of the AMI, but will generally take many minutes.
-
-- `ami_users` (array of strings) - A list of account IDs that have access to
- launch the resulting AMI(s). By default no additional users other than the
- user creating the AMI has permissions to launch it.
-
-- `ami_virtualization_type` (string) - The type of virtualization for the AMI
- you are building. This option is required to register HVM images. Can be
- `paravirtual` (default) or `hvm`.
-
-- `chroot_mounts` (array of array of strings) - This is a list of devices to
- mount into the chroot environment. This configuration parameter requires
- some additional documentation which is in the [Chroot
- Mounts](#Chroot%20Mounts) section. Please read that section for more
- information on how to use this.
-
-- `command_wrapper` (string) - How to run shell commands. This defaults to
- `{{.Command}}`. This may be useful to set if you want to set environmental
- variables or perhaps run it with `sudo` or so on. This is a configuration
- template where the `.Command` variable is replaced with the command to be
- run. Defaults to `{{.Command}}`.
-
-- `copy_files` (array of strings) - Paths to files on the running EC2
- instance that will be copied into the chroot environment prior to
- provisioning. Defaults to `/etc/resolv.conf` so that DNS lookups work. Pass
- an empty list to skip copying `/etc/resolv.conf`. You may need to do this
- if you're building an image that uses systemd.
-
-- `custom_endpoint_ec2` (string) - This option is useful if you use a cloud
- provider whose API is compatible with aws EC2. Specify another endpoint
- like this `https://ec2.custom.endpoint.com`.
-
-- `decode_authorization_messages` (boolean) - Enable automatic decoding of
- any encoded authorization (error) messages using the
- `sts:DecodeAuthorizationMessage` API. Note: requires that the effective
- user/role have permissions to `sts:DecodeAuthorizationMessage` on resource
- `*`. Default `false`.
-
-- `device_path` (string) - The path to the device where the root volume of
- the source AMI will be attached. This defaults to "" (empty string), which
- forces Packer to find an open device automatically.
-
-- `ena_support` (boolean) - Enable enhanced networking (ENA but not
- SriovNetSupport) on HVM-compatible AMIs. If set, add
- `ec2:ModifyInstanceAttribute` to your AWS IAM policy. If false, this will
- disable enhanced networking in the final AMI as opposed to passing the
- setting through unchanged from the source. Note: you must make sure
- enhanced networking is enabled on your instance. See [Amazon's
- documentation on enabling enhanced
- networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
-
-- `encrypt_boot` (boolean) - Whether or not to encrypt the resulting AMI when
- copying a provisioned instance to an AMI. By default, Packer will keep the
- encryption setting to what it was in the source image. Setting `false` will
- result in an unencrypted image, and `true` will result in an encrypted one.
-
-- `force_deregister` (boolean) - Force Packer to first deregister an existing
- AMI if one with the same name already exists. Default `false`.
-
-- `force_delete_snapshot` (boolean) - Force Packer to delete snapshots
- associated with AMIs, which have been deregistered by `force_deregister`.
- Default `false`.
-
-- `insecure_skip_tls_verify` (boolean) - This allows skipping TLS
- verification of the AWS EC2 endpoint. The default is `false`.
-
-- `kms_key_id` (string) - ID, alias or ARN of the KMS key to use for boot
- volume encryption. This only applies to the main `region`, other regions
- where the AMI will be copied will be encrypted by the default EBS KMS key.
- For valid formats see *KmsKeyId* in the [AWS API docs -
- CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
- This field is validated by Packer, when using an alias, you will have to
- prefix `kms_key_id` with `alias/`.
-
-- `from_scratch` (boolean) - Build a new volume instead of starting from an
- existing AMI root volume snapshot. Default `false`. If `true`, `source_ami`
- is no longer used and the following options become required:
- `ami_virtualization_type`, `pre_mount_commands` and `root_volume_size`. The
- below options are also required in this mode only:
-
-- `ami_block_device_mappings` (array of block device mappings) - Add one or
- more [block device
- mappings](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html)
- to the AMI. These will be attached when booting a new instance from your
- AMI. If this field is populated, and you are building from an existing source image,
- the block device mappings in the source image will be overwritten. This means you
- must have a block device mapping entry for your root volume, `root_volume_size `,
- and `root_device_name`. `Your options here may vary depending on the type of VM
- you use. The block device mappings allow for the following configuration:
-
- <%= partial "partials/builders/aws-common-block-device-a-i" %>
-
- - `kms_key_id` (string) - The ARN for the KMS encryption key. When
- specifying `kms_key_id`, `encrypted` needs to be set to `true`. For
- valid formats see *KmsKeyId* in the [AWS API docs -
- CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
-
- <%= partial "partials/builders/aws-common-block-device-i-v" %>
-
-- `region_kms_key_ids` (map of strings) - a map of regions to copy the ami
- to, along with the custom kms key id (alias or arn) to use for encryption
- for that region. Keys must match the regions provided in `ami_regions`. If
- you just want to encrypt using a default ID, you can stick with
- `kms_key_id` and `ami_regions`. If you want a region to be encrypted with
- that region's default key ID, you can use an empty string `""` instead of a
- key id in this map. (e.g. `"us-east-1": ""`) However, you cannot use
- default key IDs if you are using this in conjunction with `snapshot_users`
- -- in that situation you must use custom keys. For valid formats see
- *KmsKeyId* in the [AWS API docs -
- CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
-
-- `root_device_name` (string) - The root device name. For example, `xvda`.
-
-- `mfa_code` (string) - The MFA
- [TOTP](https://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm)
- code. This should probably be a user variable since it changes all the
- time.
-
-- `mount_path` (string) - The path where the volume will be mounted. This is
- where the chroot environment will be. This defaults to
- `/mnt/packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration
- template where the `.Device` variable is replaced with the name of the
- device where the volume is attached.
-
-- `mount_partition` (string) - The partition number containing the /
- partition. By default this is the first partition of the volume, (for
- example, `xvda1`) but you can designate the entire block device by setting
- `"mount_partition": "0"` in your config, which will mount `xvda` instead.
-
-- `mount_options` (array of strings) - Options to supply the `mount` command
- when mounting devices. Each option will be prefixed with `-o` and supplied
- to the `mount` command ran by Packer. Because this command is ran in a
- shell, user discretion is advised. See [this manual page for the mount
- command](http://linuxcommand.org/man_pages/mount8.html) for valid file
- system specific options.
-
-- `nvme_device_path` (string) - When we call the mount command (by default
- `mount -o device dir`), the string provided in `nvme_mount_path` will
- replace `device` in that command. When this option is not set, `device` in
- that command will be something like `/dev/sdf1`, mirroring the attached
- device name. This assumption works for most instances but will fail with c5
- and m5 instances. In order to use the chroot builder with c5 and m5
- instances, you must manually set `nvme_device_path` and `device_path`.
-
-- `pre_mount_commands` (array of strings) - A series of commands to execute
- after attaching the root volume and before mounting the chroot. This is not
- required unless using `from_scratch`. If so, this should include any
- partitioning and filesystem creation commands. The path to the device is
- provided by `{{.Device}}`.
-
-- `profile` (string) - The profile to use in the shared credentials file for
- AWS. See Amazon's documentation on [specifying
- profiles](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-profiles)
- for more details.
-
-- `post_mount_commands` (array of strings) - As `pre_mount_commands`, but the
- commands are executed after mounting the root device and before the extra
- mount and copy steps. The device and mount path are provided by
- `{{.Device}}` and `{{.MountPath}}`.
-
-- `root_volume_size` (number) - The size of the root volume in GB for the
- chroot environment and the resulting AMI. Default size is the snapshot size
- of the `source_ami` unless `from_scratch` is `true`, in which case this
- field must be defined.
-
-- `root_volume_type` (string) - The type of EBS volume for the chroot
- environment and resulting AMI. The default value is the type of the
- `source_ami`, unless `from_scratch` is `true`, in which case the default
- value is `gp2`. You can only specify `io1` if building based on top of a
- `source_ami` which is also `io1`.
-
-- `root_volume_tags` (object of key/value strings) - Tags to apply to the
- volumes that are *launched*. This is a [template
- engine](/docs/templates/engine.html), see [Build template
- data](#build-template-data) for more information.
-
-- `skip_region_validation` (boolean) - Set to `true` if you want to skip
- validation of the `ami_regions` configuration option. Default `false`.
-
-- `snapshot_tags` (object of key/value strings) - Tags to apply to snapshot.
- They will override AMI tags if already applied to snapshot. This is a
- [template engine](/docs/templates/engine.html), see [Build template
- data](#build-template-data) for more information.
-
-- `snapshot_groups` (array of strings) - A list of groups that have access to
- create volumes from the snapshot(s). By default no groups have permission
- to create volumes from the snapshot(s). `all` will make the snapshot
- publicly accessible.
-
-- `snapshot_users` (array of strings) - A list of account IDs that have
- access to create volumes from the snapshot(s). By default no additional
- users other than the user creating the AMI has permissions to create
- volumes from the backing snapshot(s).
-
-- `source_ami_filter` (object) - Filters used to populate the `source_ami`
- field. Example:
-
- ``` json
- "source_ami_filter": {
- "filters": {
- "virtualization-type": "hvm",
- "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
- "root-device-type": "ebs"
- },
- "owners": ["099720109477"],
- "most_recent": true
- }
- ```
-
- This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
- This will fail unless *exactly* one AMI is returned. In the above example,
- `most_recent` will cause this to succeed by selecting the newest image.
-
- - `filters` (map of strings) - filters used to select a `source_ami`.
- NOTE: This will fail unless *exactly* one AMI is returned. Any filter
- described in the docs for
- [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
- is valid.
-
- - `owners` (array of strings) - Filters the images by their owner. You
- may specify one or more AWS account IDs, "self" (which will use the
- account whose credentials you are using to run Packer), or an AWS owner
- alias: for example, "amazon", "aws-marketplace", or "microsoft". This
- option is required for security reasons.
-
- - `most_recent` (boolean) - Selects the newest created image when `true`.
- This is most useful for selecting a daily distro build.
-
- You may set this in place of `source_ami` or in conjunction with it. If you
- set this in conjunction with `source_ami`, the `source_ami` will be added
- to the filter. The provided `source_ami` must meet all of the filtering
- criteria provided in `source_ami_filter`; this pins the AMI returned by the
- filter, but will cause Packer to fail if the `source_ami` does not exist.
-
-- `sriov_support` (boolean) - Enable enhanced networking (SriovNetSupport but
- not ENA) on HVM-compatible AMIs. If `true`, add
- `ec2:ModifyInstanceAttribute` to your AWS IAM policy. Note: you must make
- sure enhanced networking is enabled on your instance. See [Amazon's
- documentation on enabling enhanced
- networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
- Default `false`.
-
-- `tags` (object of key/value strings) - Tags applied to the AMI. This is a
- [template engine](/docs/templates/engine.html), see [Build template
- data](#build-template-data) for more information.
-
-- `vault_aws_engine` (object) - Get credentials from Hashicorp Vault's aws
- secrets engine. You must already have created a role to use. For more
- information about generating credentials via the Vault engine, see the
- [Vault
- docs.](https://www.vaultproject.io/api/secret/aws/index.html#generate-credentials)
- If you set this flag, you must also set the below options:
- - `name` (string) - Required. Specifies the name of the role to generate
- credentials against. This is part of the request URL.
- - `engine_name` (string) - The name of the aws secrets engine. In the
- Vault docs, this is normally referred to as "aws", and Packer will
- default to "aws" if `engine_name` is not set.
- - `role_arn` (string)- The ARN of the role to assume if credential\_type
- on the Vault role is assumed\_role. Must match one of the allowed role
- ARNs in the Vault role. Optional if the Vault role only allows a single
- AWS role ARN; required otherwise.
- - `ttl` (string) - Specifies the TTL for the use of the STS token. This
- is specified as a string with a duration suffix. Valid only when
- credential\_type is assumed\_role or federation\_token. When not
- specified, the default\_sts\_ttl set for the role will be used. If that
- is also not set, then the default value of 3600s will be used. AWS
- places limits on the maximum TTL allowed. See the AWS documentation on
- the DurationSeconds parameter for AssumeRole (for assumed\_role
- credential types) and GetFederationToken (for federation\_token
- credential types) for more details.
-
- Example:
- `json { "vault_aws_engine": { "name": "myrole", "role_arn": "myarn", "ttl": "3600s" } }`
+<%= partial "partials/builder/amazon/chroot/Config-not-required" %>
+<%= partial "partials/builder/amazon/common/AccessConfig-not-required" %>
+<%= partial "partials/builder/amazon/common/AMIConfig-not-required" %>
+<%= partial "partials/builder/amazon/common/AMIBlockDevices-not-required" %>
## Basic Example
diff --git a/website/source/docs/builders/amazon-ebs.html.md.erb b/website/source/docs/builders/amazon-ebs.html.md.erb
index 9ec0c496e..3fd837c45 100644
--- a/website/source/docs/builders/amazon-ebs.html.md.erb
+++ b/website/source/docs/builders/amazon-ebs.html.md.erb
@@ -45,487 +45,18 @@ builder.
### Required:
-- `access_key` (string) - The access key used to communicate with AWS. [Learn
- how to set this](amazon.html#specifying-amazon-credentials). This is not
- required if you are using `use_vault_aws_engine` for authentication
- instead.
-
-- `ami_name` (string) - The name of the resulting AMI that will appear when
- managing AMIs in the AWS console or via APIs. This must be unique. To help
- make this unique, use a function like `timestamp` (see [template
- engine](../templates/engine.html) for more info).
-
-- `instance_type` (string) - The EC2 instance type to use while building the
- AMI, such as `t2.small`.
-
-- `region` (string) - The name of the region, such as `us-east-1`, in which
- to launch the EC2 instance to create the AMI.
-
-- `secret_key` (string) - The secret key used to communicate with AWS. [Learn
- how to set this](amazon.html#specifying-amazon-credentials). This is not
- required if you are using `use_vault_aws_engine` for authentication
- instead.
-
-- `source_ami` (string) - The initial AMI used as a base for the newly
- created machine. `source_ami_filter` may be used instead to populate this
- automatically.
+<%= partial "partials/builder/amazon/common/AccessConfig-required" %>
+<%= partial "partials/builder/amazon/common/AMIConfig-required" %>
+<%= partial "partials/builder/amazon/common/RunConfig-required" %>
### Optional:
-- `ami_block_device_mappings` (array of block device mappings) - Add one or
- more [block device
- mappings](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html)
- to the AMI. These will be attached when booting a new instance from your
- AMI. To add a block device during the Packer build see
- `launch_block_device_mappings` below. Your options here may vary depending
- on the type of VM you use. The block device mappings allow for the
- following configuration:
-
- <%= partial "partials/builders/aws-common-block-device-a-i" %>
-
- <%= partial "partials/builders/aws-common-block-device-i-v" %>
-
-- `ami_description` (string) - The description to set for the resulting
- AMI(s). By default this description is empty. This is a [template
- engine](../templates/engine.html), see [Build template
- data](#build-template-data) for more information.
-
-- `ami_groups` (array of strings) - A list of groups that have access to
- launch the resulting AMI(s). By default no groups have permission to launch
- the AMI. `all` will make the AMI publicly accessible. AWS currently doesn't
- accept any value other than `all`.
-
-- `ami_product_codes` (array of strings) - A list of product codes to
- associate with the AMI. By default no product codes are associated with the
- AMI.
-
-- `ami_regions` (array of strings) - A list of regions to copy the AMI to.
- Tags and attributes are copied along with the AMI. AMI copying takes time
- depending on the size of the AMI, but will generally take many minutes.
-
-- `ami_users` (array of strings) - A list of account IDs that have access to
- launch the resulting AMI(s). By default no additional users other than the
- user creating the AMI has permissions to launch it.
-
-- `ami_virtualization_type` (string) - The type of virtualization for the AMI
- you are building. This option must match the supported virtualization type
- of `source_ami`. Can be `paravirtual` or `hvm`.
-
-- `associate_public_ip_address` (boolean) - If using a non-default VPC,
- public IP addresses are not provided by default. If this is `true`, your
- new instance will get a Public IP. default: `false`
-
-- `availability_zone` (string) - Destination availability zone to launch
- instance in. Leave this empty to allow Amazon to auto-assign.
-
-- `block_duration_minutes` (int64) - Requires `spot_price` to be set. The
- required duration for the Spot Instances (also known as Spot blocks). This
- value must be a multiple of 60 (60, 120, 180, 240, 300, or 360). You can't
- specify an Availability Zone group or a launch group if you specify a
- duration.
-
-- `custom_endpoint_ec2` (string) - This option is useful if you use a cloud
- provider whose API is compatible with aws EC2. Specify another endpoint
- like this `https://ec2.custom.endpoint.com`.
-
-- `decode_authorization_messages` (boolean) - Enable automatic decoding of
- any encoded authorization (error) messages using the
- `sts:DecodeAuthorizationMessage` API. Note: requires that the effective
- user/role have permissions to `sts:DecodeAuthorizationMessage` on resource
- `*`. Default `false`.
-
-- `disable_stop_instance` (boolean) - Packer normally stops the build
- instance after all provisioners have run. For Windows instances, it is
- sometimes desirable to [run
- Sysprep](http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ami-create-standard.html)
- which will stop the instance for you. If this is set to `true`, Packer
- *will not* stop the instance but will assume that you will send the stop
- signal yourself through your final provisioner. You can do this with a
- [windows-shell
- provisioner](https://www.packer.io/docs/provisioners/windows-shell.html).
-
- Note that Packer will still wait for the instance to be stopped, and
- failing to send the stop signal yourself, when you have set this flag to
- `true`, will cause a timeout.
-
- Example of a valid shutdown command:
-
- ``` json
- {
- "type": "windows-shell",
- "inline": ["\"c:\\Program Files\\Amazon\\Ec2ConfigService\\ec2config.exe\" -sysprep"]
- }
- ```
-
-- `ebs_optimized` (boolean) - Mark instance as [EBS
- Optimized](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
- Default `false`.
-
-- `ena_support` (boolean) - Enable enhanced networking (ENA but not
- SriovNetSupport) on HVM-compatible AMIs. If set, add
- `ec2:ModifyInstanceAttribute` to your AWS IAM policy. If false, this will
- disable enhanced networking in the final AMI as opposed to passing the
- setting through unchanged from the source. Note: you must make sure
- enhanced networking is enabled on your instance. See [Amazon's
- documentation on enabling enhanced
- networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
-
-- `enable_t2_unlimited` (boolean) - Enabling T2 Unlimited allows the source
- instance to burst additional CPU beyond its available [CPU
- Credits](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-credits-baseline-concepts.html)
- for as long as the demand exists. This is in contrast to the standard
- configuration that only allows an instance to consume up to its available
- CPU Credits. See the AWS documentation for [T2
- Unlimited](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-unlimited.html)
- and the **T2 Unlimited Pricing** section of the [Amazon EC2 On-Demand
- Pricing](https://aws.amazon.com/ec2/pricing/on-demand/) document for more
- information. By default this option is disabled and Packer will set up a
- [T2
- Standard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-std.html)
- instance instead.
-
- To use T2 Unlimited you must use a T2 instance type, e.g. `t2.micro`.
- Additionally, T2 Unlimited cannot be used in conjunction with Spot
- Instances, e.g. when the `spot_price` option has been configured.
- Attempting to do so will cause an error.
-
- !> **Warning!** Additional costs may be incurred by enabling T2
- Unlimited - even for instances that would usually qualify for the
- [AWS Free Tier](https://aws.amazon.com/free/).
-
-- `encrypt_boot` (boolean) - Whether or not to encrypt the resulting AMI when
- copying a provisioned instance to an AMI. By default, Packer will keep the
- encryption setting to what it was in the source image. Setting `false` will
- result in an unencrypted image, and `true` will result in an encrypted one.
-
-- `force_delete_snapshot` (boolean) - Force Packer to delete snapshots associated with
- AMIs, which have been deregistered by `force_deregister`. Default `false`.
-
-- `force_deregister` (boolean) - Force Packer to first deregister an existing
- AMI if one with the same name already exists. Default `false`.
-
-<%= partial "partials/builders/aws-common-opional-fields" %>
-
-- `iam_instance_profile` (string) - The name of an [IAM instance
- profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html)
- to launch the EC2 instance with.
-
-- `insecure_skip_tls_verify` (boolean) - This allows skipping TLS
- verification of the AWS EC2 endpoint. The default is `false`.
-
-- `launch_block_device_mappings` (array of block device mappings) - Add one
- or more block devices before the Packer build starts. If you add instance
- store volumes or EBS volumes in addition to the root device volume, the
- created AMI will contain block device mapping information for those
- volumes. Amazon creates snapshots of the source instance's root volume and
- any other EBS volumes described here. When you launch an instance from this
- new AMI, the instance automatically launches with these additional volumes,
- and will restore them from snapshots taken from the source instance.
-
-- `mfa_code` (string) - The MFA
- [TOTP](https://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm)
- code. This should probably be a user variable since it changes all the
- time.
-
-- `profile` (string) - The profile to use in the shared credentials file for
- AWS. See Amazon's documentation on [specifying
- profiles](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-profiles)
- for more details.
-
-- `region_kms_key_ids` (map of strings) - a map of regions to copy the ami
- to, along with the custom kms key id (alias or arn) to use for encryption
- for that region. Keys must match the regions provided in `ami_regions`. If
- you just want to encrypt using a default ID, you can stick with
- `kms_key_id` and `ami_regions`. If you want a region to be encrypted with
- that region's default key ID, you can use an empty string `""` instead of a
- key id in this map. (e.g. `"us-east-1": ""`) However, you cannot use
- default key IDs if you are using this in conjunction with `snapshot_users`
- -- in that situation you must use custom keys. For valid formats see
- *KmsKeyId* in the [AWS API docs -
- CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
-
-- `run_tags` (object of key/value strings) - Tags to apply to the instance
- that is *launched* to create the AMI. These tags are *not* applied to the
- resulting AMI unless they're duplicated in `tags`. This is a [template
- engine](../templates/engine.html), see [Build template
- data](#build-template-data) for more information.
-
-- `run_volume_tags` (object of key/value strings) - Tags to apply to the
- volumes that are *launched* to create the AMI. These tags are *not* applied
- to the resulting AMI unless they're duplicated in `tags`. This is a
- [template engine](../templates/engine.html), see [Build template
- data](#build-template-data) for more information.
-
-- `security_group_id` (string) - The ID (*not* the name) of the security
- group to assign to the instance. By default this is not set and Packer will
- automatically create a new temporary security group to allow SSH access.
- Note that if this is specified, you must be sure the security group allows
- access to the `ssh_port` given below.
-
-- `security_group_ids` (array of strings) - A list of security groups as
- described above. Note that if this is specified, you must omit the
- `security_group_id`.
-
-- `security_group_filter` (object) - Filters used to populate the
- `security_group_ids` field. Example:
-
- ``` json
- {
- "security_group_filter": {
- "filters": {
- "tag:Class": "packer"
- }
- }
- }
- ```
-
- This selects the SG's with tag `Class` with the value `packer`.
-
- - `filters` (map of strings) - filters used to select a
- `security_group_ids`. Any filter described in the docs for
- [DescribeSecurityGroups](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
- is valid.
-
- `security_group_ids` take precedence over this.
-
-- `shutdown_behavior` (string) - Automatically terminate instances on
- shutdown in case Packer exits ungracefully. Possible values are "stop" and
- "terminate", default is `stop`.
-
-- `skip_region_validation` (boolean) - Set to true if you want to skip
- validation of the region configuration option. Default `false`.
-
-- `snapshot_groups` (array of strings) - A list of groups that have access to
- create volumes from the snapshot(s). By default no groups have permission
- to create volumes from the snapshot(s). `all` will make the snapshot
- publicly accessible.
-
-- `snapshot_users` (array of strings) - A list of account IDs that have
- access to create volumes from the snapshot(s). By default no additional
- users other than the user creating the AMI has permissions to create
- volumes from the backing snapshot(s).
-
-- `snapshot_tags` (object of key/value strings) - Tags to apply to snapshot.
- They will override AMI tags if already applied to snapshot. This is a
- [template engine](../templates/engine.html), see [Build template
- data](#build-template-data) for more information.
-
-- `source_ami_filter` (object) - Filters used to populate the `source_ami`
- field. Example:
-
- ``` json
- {
- "source_ami_filter": {
- "filters": {
- "virtualization-type": "hvm",
- "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
- "root-device-type": "ebs"
- },
- "owners": ["099720109477"],
- "most_recent": true
- }
- }
- ```
-
- This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
- This will fail unless *exactly* one AMI is returned. In the above example,
- `most_recent` will cause this to succeed by selecting the newest image.
-
- - `filters` (map of strings) - filters used to select a `source_ami`.
- NOTE: This will fail unless *exactly* one AMI is returned. Any filter
- described in the docs for
- [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
- is valid.
-
- - `owners` (array of strings) - Filters the images by their owner. You
- may specify one or more AWS account IDs, "self" (which will use the
- account whose credentials you are using to run Packer), or an AWS owner
- alias: for example, `amazon`, `aws-marketplace`, or `microsoft`. This
- option is required for security reasons.
-
- - `most_recent` (boolean) - Selects the newest created image when true.
- This is most useful for selecting a daily distro build.
-
- You may set this in place of `source_ami` or in conjunction with it. If you
- set this in conjunction with `source_ami`, the `source_ami` will be added
- to the filter. The provided `source_ami` must meet all of the filtering
- criteria provided in `source_ami_filter`; this pins the AMI returned by the
- filter, but will cause Packer to fail if the `source_ami` does not exist.
-
-- `spot_instance_types` (array of strings) - a list of acceptable instance
- types to run your build on. We will request a spot instance using the max
- price of `spot_price` and the allocation strategy of "lowest price".
- Your instance will be launched on an instance type of the lowest available
- price that you have in your list. This is used in place of instance_type.
- You may only set either spot_instance_types or instance_type, not both.
- This feature exists to help prevent situations where a Packer build fails
- because a particular availability zone does not have capacity for the
- specific instance_type requested in instance_type.
-
-- `spot_price` (string) - The maximum hourly price to pay for a spot instance
- to create the AMI. Spot instances are a type of instance that EC2 starts
- when the current spot price is less than the maximum price you specify.
- Spot price will be updated based on available spot instance capacity and
- current spot instance requests. It may save you some costs. You can set
- this to `auto` for Packer to automatically discover the best spot price or
- to "0" to use an on demand instance (default).
-
-- `spot_price_auto_product` (string) - Required if `spot_price` is set to
- `auto`. This tells Packer what sort of AMI you're launching to find the
- best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`,
- `Windows`, `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`,
- `Windows (Amazon VPC)`
-
-- `spot_tags` (object of key/value strings) - Requires `spot_price` to be
- set. This tells Packer to apply tags to the spot request that is issued.
-
-- `sriov_support` (boolean) - Enable enhanced networking (SriovNetSupport but
- not ENA) on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute`
- to your AWS IAM policy. Note: you must make sure enhanced networking is
- enabled on your instance. See [Amazon's documentation on enabling enhanced
- networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
- Default `false`.
-
-- `ssh_keypair_name` (string) - If specified, this is the key that will be
- used for SSH with the machine. The key must match a key pair name loaded up
- into Amazon EC2. By default, this is blank, and Packer will generate a
- temporary keypair unless
- [`ssh_password`](../templates/communicator.html#ssh_password) is used.
- [`ssh_private_key_file`](../templates/communicator.html#ssh_private_key_file)
- or `ssh_agent_auth` must be specified when `ssh_keypair_name` is utilized.
-
-- `ssh_agent_auth` (boolean) - If true, the local SSH agent will be used to
- authenticate connections to the source instance. No temporary keypair will
- be created, and the values of `ssh_password` and `ssh_private_key_file`
- will be ignored. To use this option with a key pair already configured in
- the source AMI, leave the `ssh_keypair_name` blank. To associate an
- existing key pair in AWS with the source instance, set the
- `ssh_keypair_name` field to the name of the key pair.
-
-- `ssh_private_ip` (boolean) - No longer supported. See
- [`ssh_interface`](#ssh_interface). A fixer exists to migrate.
-
-- `ssh_interface` (string) - One of `public_ip`, `private_ip`, `public_dns`,
- or `private_dns`. If set, either the public IP address, private IP address,
- public DNS name or private DNS name will used as the host for SSH. The
- default behaviour if inside a VPC is to use the public IP address if
- available, otherwise the private IP address will be used. If not in a VPC
- the public DNS name will be used. Also works for WinRM.
-
- Where Packer is configured for an outbound proxy but WinRM traffic should
- be direct, `ssh_interface` must be set to `private_dns` and
- `.compute.internal` included in the `NO_PROXY` environment
- variable.
-
-- `subnet_id` (string) - If using VPC, the ID of the subnet, such as
- `subnet-12345def`, where Packer will launch the EC2 instance. This field is
- required if you are using an non-default VPC.
-
-- `subnet_filter` (object) - Filters used to populate the `subnet_id` field.
- Example:
-
- ``` json
- {
- "subnet_filter": {
- "filters": {
- "tag:Class": "build"
- },
- "most_free": true,
- "random": false
- }
- }
- ```
-
- This selects the Subnet with tag `Class` with the value `build`, which has
- the most free IP addresses. NOTE: This will fail unless *exactly* one
- Subnet is returned. By using `most_free` or `random` one will be selected
- from those matching the filter.
-
- - `filters` (map of strings) - filters used to select a `subnet_id`.
- NOTE: This will fail unless *exactly* one Subnet is returned. Any
- filter described in the docs for
- [DescribeSubnets](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html)
- is valid.
-
- - `most_free` (boolean) - The Subnet with the most free IPv4 addresses
- will be used if multiple Subnets matches the filter.
-
- - `random` (boolean) - A random Subnet will be used if multiple Subnets
- matches the filter. `most_free` have precendence over this.
-
- `subnet_id` take precedence over this.
-
-- `tags` (object of key/value strings) - Tags applied to the AMI and relevant
- snapshots. This is a [template engine](../templates/engine.html), see
- [Build template data](#build-template-data) for more information.
-
-- `temporary_key_pair_name` (string) - The name of the temporary key pair to
- generate. By default, Packer generates a name that looks like
- `packer_`, where <UUID> is a 36 character unique identifier.
-
-- `temporary_security_group_source_cidrs` (list of string) - A list of IPv4
- CIDR blocks to be authorized access to the instance, when packer is creating a temporary security group.
-
- The default is [`0.0.0.0/0`] (i.e., allow any IPv4 source). This is only used when `security_group_id` or `security_group_ids` is not specified.
-
-- `token` (string) - The access token to use. This is different from the
- access key and secret key. If you're not sure what this is, then you
- probably don't need it. This will also be read from the `AWS_SESSION_TOKEN`
- environmental variable.
-
-- `user_data` (string) - User data to apply when launching the instance. Note
- that you need to be careful about escaping characters due to the templates
- being JSON. It is often more convenient to use `user_data_file`, instead.
- Packer will not automatically wait for a user script to finish before
- shutting down the instance this must be handled in a provisioner.
-
-- `user_data_file` (string) - Path to a file that will be used for the user
- data when launching the instance.
-
-- `vault_aws_engine` (object) - Get credentials from Hashicorp Vault's aws
- secrets engine. You must already have created a role to use. For more
- information about generating credentials via the Vault engine, see the
- [Vault
- docs.](https://www.vaultproject.io/api/secret/aws/index.html#generate-credentials)
- If you set this flag, you must also set the below options:
- - `name` (string) - Required. Specifies the name of the role to generate
- credentials against. This is part of the request URL.
- - `engine_name` (string) - The name of the aws secrets engine. In the
- Vault docs, this is normally referred to as "aws", and Packer will
- default to "aws" if `engine_name` is not set.
- - `role_arn` (string)- The ARN of the role to assume if credential\_type
- on the Vault role is assumed\_role. Must match one of the allowed role
- ARNs in the Vault role. Optional if the Vault role only allows a single
- AWS role ARN; required otherwise.
- - `ttl` (string) - Specifies the TTL for the use of the STS token. This
- is specified as a string with a duration suffix. Valid only when
- credential\_type is assumed\_role or federation\_token. When not
- specified, the default\_sts\_ttl set for the role will be used. If that
- is also not set, then the default value of 3600s will be used. AWS
- places limits on the maximum TTL allowed. See the AWS documentation on
- the DurationSeconds parameter for AssumeRole (for assumed\_role
- credential types) and GetFederationToken (for federation\_token
- credential types) for more details.
-
- ``` json
- {
- "vault_aws_engine": {
- "name": "myrole",
- "role_arn": "myarn",
- "ttl": "3600s"
- }
- }
- ```
-
-- `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID
- in order to create a temporary security group within the VPC. Requires
- `subnet_id` to be set. If this field is left blank, Packer will try to get
- the VPC ID from the `subnet_id`.
-
-
-- `windows_password_timeout` (string) - The timeout for waiting for a Windows
- password for Windows instances. Defaults to 20 minutes. Example value:
- `10m`
+<%= partial "partials/builder/amazon/common/AMIBlockDevices-not-required" %>
+<%= partial "partials/builder/amazon/common/AccessConfig-not-required" %>
+<%= partial "partials/builder/amazon/common/AMIConfig-not-required" %>
+<%= partial "partials/builder/amazon/common/RunConfig-not-required" %>
+<%= partial "partials/builder/amazon/common/LaunchBlockDevices-not-required" %>
+<%= partial "partials/helper/communicator/Config-not-required" %>
## Basic Example
diff --git a/website/source/docs/builders/amazon-ebssurrogate.html.md.erb b/website/source/docs/builders/amazon-ebssurrogate.html.md.erb
index 969c1e5c3..4ffbf00e0 100644
--- a/website/source/docs/builders/amazon-ebssurrogate.html.md.erb
+++ b/website/source/docs/builders/amazon-ebssurrogate.html.md.erb
@@ -35,488 +35,18 @@ builder.
### Required:
-- `access_key` (string) - The access key used to communicate with AWS. [Learn
- how to set this](/docs/builders/amazon.html#specifying-amazon-credentials)
-
-- `instance_type` (string) - The EC2 instance type to use while building the
- AMI, such as `m1.small`.
-
-- `region` (string) - The name of the region, such as `us-east-1`, in which
- to launch the EC2 instance to create the AMI.
-
-- `secret_key` (string) - The secret key used to communicate with AWS. [Learn
- how to set this](/docs/builders/amazon.html#specifying-amazon-credentials)
-
-- `source_ami` (string) - The initial AMI used as a base for the newly
- created machine. `source_ami_filter` may be used instead to populate this
- automatically.
-
-- `ami_root_device` (block device mapping) - A block device mapping
- describing the root device of the AMI. This looks like the mappings in
- `ami_block_device_mapping`, except with an additional field:
-
- - `source_device_name` (string) - The device name of the block device on
- the source instance to be used as the root device for the AMI. This
- must correspond to a block device in `launch_block_device_mapping`.
+<%= partial "partials/builder/amazon/common/AccessConfig-required" %>
+<%= partial "partials/builder/amazon/common/RunConfig-required" %>
+<%= partial "partials/builder/amazon/ebssurrogate/Config-required" %>
### Optional:
-- `ami_architecture` (string) - what architecture to use when registering the
- final AMI; valid options are "x86_64" or "arm64". Defaults to "x86_64".
-
-- `ami_block_device_mappings` (array of block device mappings) - Add one or
- more [block device
- mappings](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html)
- to the AMI. These will be attached when booting a new instance from your
- AMI. To add a block device during the packer build see
- `launch_block_device_mappings` below. Your options here may vary depending
- on the type of VM you use. The block device mappings allow for the
- following configuration:
-
- <%= partial "partials/builders/aws-common-block-device-a-i" %>
-
- <%= partial "partials/builders/aws-common-block-device-i-v" %>
-
-- `ami_description` (string) - The description to set for the resulting
- AMI(s). By default this description is empty. This is a [template
- engine](/docs/templates/engine.html), see [Build template
- data](#build-template-data) for more information.
-
-- `ami_groups` (array of strings) - A list of groups that have access to
- launch the resulting AMI(s). By default no groups have permission to launch
- the AMI. `all` will make the AMI publicly accessible. AWS currently doesn't
- accept any value other than `all`.
-
-- `ami_product_codes` (array of strings) - A list of product codes to
- associate with the AMI. By default no product codes are associated with the
- AMI.
-
-- `ami_regions` (array of strings) - A list of regions to copy the AMI to.
- Tags and attributes are copied along with the AMI. AMI copying takes time
- depending on the size of the AMI, but will generally take many minutes.
-
-- `ami_users` (array of strings) - A list of account IDs that have access to
- launch the resulting AMI(s). By default no additional users other than the
- user creating the AMI has permissions to launch it.
-
-- `ami_virtualization_type` (string) - The type of virtualization for the AMI
- you are building. This option must match the supported virtualization type
- of `source_ami`. Can be `paravirtual` or `hvm`.
-
-- `associate_public_ip_address` (boolean) - If using a non-default VPC,
- public IP addresses are not provided by default. If this is `true`, your
- new instance will get a Public IP. default: `false`
-
-- `availability_zone` (string) - Destination availability zone to launch
- instance in. Leave this empty to allow Amazon to auto-assign.
-
-- `block_duration_minutes` (int64) - Requires `spot_price` to be set. The
- required duration for the Spot Instances (also known as Spot blocks). This
- value must be a multiple of 60 (60, 120, 180, 240, 300, or 360). You can't
- specify an Availability Zone group or a launch group if you specify a
- duration.
-
-- `custom_endpoint_ec2` (string) - This option is useful if you use a cloud
- provider whose API is compatible with aws EC2. Specify another endpoint
- like this `https://ec2.custom.endpoint.com`.
-
-- `decode_authorization_messages` (boolean) - Enable automatic decoding of
- any encoded authorization (error) messages using the
- `sts:DecodeAuthorizationMessage` API. Note: requires that the effective
- user/role have permissions to `sts:DecodeAuthorizationMessage` on resource
- `*`. Default `false`.
-
-- `disable_stop_instance` (boolean) - Packer normally stops the build
- instance after all provisioners have run. For Windows instances, it is
- sometimes desirable to [run
- Sysprep](http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ami-create-standard.html)
- which will stop the instance for you. If this is set to true, Packer *will
- not* stop the instance but will assume that you will send the stop signal
- yourself through your final provisioner. You can do this with a
- [windows-shell
- provisioner](https://www.packer.io/docs/provisioners/windows-shell.html).
-
- Note that Packer will still wait for the instance to be stopped, and
- failing to send the stop signal yourself, when you have set this flag to
- `true`, will cause a timeout.
-
- Example of a valid shutdown command:
-
- ``` json
- {
- "type": "windows-shell",
- "inline": ["\"c:\\Program Files\\Amazon\\Ec2ConfigService\\ec2config.exe\" -sysprep"]
- }
- ```
-
-- `ebs_optimized` (boolean) - Mark instance as [EBS
- Optimized](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
- Default `false`.
-
-- `ena_support` (boolean) - Enable enhanced networking (ENA but not
- SriovNetSupport) on HVM-compatible AMIs. If set, add
- `ec2:ModifyInstanceAttribute` to your AWS IAM policy. If false, this will
- disable enhanced networking in the final AMI as opposed to passing the
- setting through unchanged from the source. Note: you must make sure
- enhanced networking is enabled on your instance. See [Amazon's
- documentation on enabling enhanced
- networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
-
-- `enable_t2_unlimited` (boolean) - Enabling T2 Unlimited allows the source
- instance to burst additional CPU beyond its available [CPU
- Credits](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-credits-baseline-concepts.html)
- for as long as the demand exists. This is in contrast to the standard
- configuration that only allows an instance to consume up to its available
- CPU Credits. See the AWS documentation for [T2
- Unlimited](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-unlimited.html)
- and the **T2 Unlimited Pricing** section of the [Amazon EC2 On-Demand
- Pricing](https://aws.amazon.com/ec2/pricing/on-demand/) document for more
- information. By default this option is disabled and Packer will set up a
- [T2
- Standard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-std.html)
- instance instead.
-
- To use T2 Unlimited you must use a T2 instance type, e.g. `t2.micro`.
- Additionally, T2 Unlimited cannot be used in conjunction with Spot
- Instances, e.g. when the `spot_price` option has been configured.
- Attempting to do so will cause an error.
-
- !> **Warning!** Additional costs may be incurred by enabling T2
- Unlimited - even for instances that would usually qualify for the [AWS Free
- Tier](https://aws.amazon.com/free/).
-
-- `encrypt_boot` (boolean) - Whether or not to encrypt the resulting AMI when
- copying a provisioned instance to an AMI. By default, Packer will keep the
- encryption setting to what it was in the source image. Setting `false` will
- result in an unencrypted image, and `true` will result in an encrypted one.
-
-- `force_deregister` (boolean) - Force Packer to first deregister an existing
- AMI if one with the same name already exists. Default `false`.
-
-- `force_delete_snapshot` (boolean) - Force Packer to delete snapshots
- associated with AMIs, which have been deregistered by `force_deregister`.
- Default `false`.
-
-<%= partial "partials/builders/aws-common-opional-fields" %>
-
-- `iam_instance_profile` (string) - The name of an [IAM instance
- profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html)
- to launch the EC2 instance with.
-
-- `insecure_skip_tls_verify` (boolean) - This allows skipping TLS
- verification of the AWS EC2 endpoint. The default is `false`.
-
-- `launch_block_device_mappings` (array of block device mappings) - Add one
- or more block devices before the Packer build starts. If you add instance
- store volumes or EBS volumes in addition to the root device volume, the
- created AMI will contain block device mapping information for those
- volumes. Amazon creates snapshots of the source instance's root volume and
- any other EBS volumes described here. When you launch an instance from this
- new AMI, the instance automatically launches with these additional volumes,
- and will restore them from snapshots taken from the source instance.
-
- In addition to the fields available in ami_block_device_mappings, you may
- optionally use the following field:
- - `omit_from_artifact` (boolean) - If true, this block device will not
- be snapshotted and the created AMI will not contain block device mapping
- information for this volume. If false, the block device will be mapped
- into the final created AMI. Set this option to true if you need a block
- device mounted in the surrogate AMI but not in the final created AMI.
-
-- `mfa_code` (string) - The MFA
- [TOTP](https://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm)
- code. This should probably be a user variable since it changes all the
- time.
-
-- `profile` (string) - The profile to use in the shared credentials file for
- AWS. See Amazon's documentation on [specifying
- profiles](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-profiles)
- for more details.
-
-- `region_kms_key_ids` (map of strings) - a map of regions to copy the ami
- to, along with the custom kms key id (alias or arn) to use for encryption
- for that region. Keys must match the regions provided in `ami_regions`. If
- you just want to encrypt using a default ID, you can stick with
- `kms_key_id` and `ami_regions`. If you want a region to be encrypted with
- that region's default key ID, you can use an empty string `""` instead of a
- key id in this map. (e.g. `"us-east-1": ""`) However, you cannot use
- default key IDs if you are using this in conjunction with `snapshot_users`
- -- in that situation you must use custom keys. For valid formats see
- *KmsKeyId* in the [AWS API docs -
- CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
-
-- `run_tags` (object of key/value strings) - Tags to apply to the instance
- that is *launched* to create the AMI. These tags are *not* applied to the
- resulting AMI unless they're duplicated in `tags`. This is a [template
- engine](/docs/templates/engine.html), see [Build template
- data](#build-template-data) for more information.
-
-- `run_volume_tags` (object of key/value strings) - Tags to apply to the
- volumes that are *launched* to create the AMI. These tags are *not* applied
- to the resulting AMI unless they're duplicated in `tags`. This is a
- [template engine](/docs/templates/engine.html), see [Build template
- data](#build-template-data) for more information.
-
-- `security_group_id` (string) - The ID (*not* the name) of the security
- group to assign to the instance. By default this is not set and Packer will
- automatically create a new temporary security group to allow SSH access.
- Note that if this is specified, you must be sure the security group allows
- access to the `ssh_port` given below.
-
-- `security_group_ids` (array of strings) - A list of security groups as
- described above. Note that if this is specified, you must omit the
- `security_group_id`.
-
-- `security_group_filter` (object) - Filters used to populate the
- `security_group_ids` field. Example:
-
- ``` json
- {
- "security_group_filter": {
- "filters": {
- "tag:Class": "packer"
- }
- }
- }
- ```
-
- This selects the SG's with tag `Class` with the value `packer`.
-
- - `filters` (map of strings) - filters used to select a
- `security_group_ids`. Any filter described in the docs for
- [DescribeSecurityGroups](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
- is valid.
-
- `security_group_ids` take precedence over this.
-
-- `shutdown_behavior` (string) - Automatically terminate instances on
- shutdown incase packer exits ungracefully. Possible values are "stop" and
- "terminate", default is `stop`.
-
-- `skip_region_validation` (boolean) - Set to true if you want to skip
- validation of the region configuration option. Default `false`.
-
-- `snapshot_groups` (array of strings) - A list of groups that have access to
- create volumes from the snapshot(s). By default no groups have permission
- to create volumes from the snapshot(s). `all` will make the snapshot
- publicly accessible.
-
-- `snapshot_users` (array of strings) - A list of account IDs that have
- access to create volumes from the snapshot(s). By default no additional
- users other than the user creating the AMI has permissions to create
- volumes from the backing snapshot(s).
-
-- `snapshot_tags` (object of key/value strings) - Tags to apply to snapshot.
- They will override AMI tags if already applied to snapshot. This is a
- [template engine](/docs/templates/engine.html), see [Build template
- data](#build-template-data) for more information.
-
-- `source_ami_filter` (object) - Filters used to populate the `source_ami`
- field. Example:
-
- ``` json
- {
- "source_ami_filter": {
- "filters": {
- "virtualization-type": "hvm",
- "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
- "root-device-type": "ebs"
- },
- "owners": ["099720109477"],
- "most_recent": true
- }
- }
- ```
-
- This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
- This will fail unless *exactly* one AMI is returned. In the above example,
- `most_recent` will cause this to succeed by selecting the newest image.
-
- - `filters` (map of strings) - filters used to select a `source_ami`.
- NOTE: This will fail unless *exactly* one AMI is returned. Any filter
- described in the docs for
- [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
- is valid.
-
- - `owners` (array of strings) - Filters the images by their owner. You
- may specify one or more AWS account IDs, `self` (which will use the
- account whose credentials you are using to run Packer), or an AWS owner
- alias: for example, `amazon`, `aws-marketplace`, or `microsoft`. This
- option is required for security reasons.
-
- - `most_recent` (boolean) - Selects the newest created image when true.
- This is most useful for selecting a daily distro build.
-
- You may set this in place of `source_ami` or in conjunction with it. If you
- set this in conjunction with `source_ami`, the `source_ami` will be added
- to the filter. The provided `source_ami` must meet all of the filtering
- criteria provided in `source_ami_filter`; this pins the AMI returned by the
- filter, but will cause Packer to fail if the `source_ami` does not exist.
-
-- `spot_instance_types` (array of strings) - a list of acceptable instance
- types to run your build on. We will request a spot instance using the max
- price of `spot_price` and the allocation strategy of "lowest price".
- Your instance will be launched on an instance type of the lowest available
- price that you have in your list. This is used in place of instance_type.
- You may only set either spot_instance_types or instance_type, not both.
- This feature exists to help prevent situations where a Packer build fails
- because a particular availability zone does not have capacity for the
- specific instance_type requested in instance_type.
-
-- `spot_price` (string) - The maximum hourly price to pay for a spot instance
- to create the AMI. Spot instances are a type of instance that EC2 starts
- when the current spot price is less than the maximum price you specify.
- Spot price will be updated based on available spot instance capacity and
- current spot instance requests. It may save you some costs. You can set
- this to `auto` for Packer to automatically discover the best spot price or
- to "0" to use an on demand instance (default).
-
-- `spot_price_auto_product` (string) - Required if `spot_price` is set to
- `auto`. This tells Packer what sort of AMI you're launching to find the
- best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`,
- `Windows`, `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`,
- `Windows (Amazon VPC)`
-
-- `spot_tags` (object of key/value strings) - Requires `spot_price` to be
- set. This tells Packer to apply tags to the spot request that is issued.
-
-- `sriov_support` (boolean) - Enable enhanced networking (SriovNetSupport but
- not ENA) on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute`
- to your AWS IAM policy. Note: you must make sure enhanced networking is
- enabled on your instance. See [Amazon's documentation on enabling enhanced
- networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
- Default `false`.
-
-- `ssh_keypair_name` (string) - If specified, this is the key that will be
- used for SSH with the machine. The key must match a key pair name loaded up
- into Amazon EC2. By default, this is blank, and Packer will generate a
- temporary keypair unless
- [`ssh_password`](/docs/templates/communicator.html#ssh_password) is used.
- [`ssh_private_key_file`](/docs/templates/communicator.html#ssh_private_key_file)
- or `ssh_agent_auth` must be specified when `ssh_keypair_name` is utilized.
-
-- `ssh_agent_auth` (boolean) - If true, the local SSH agent will be used to
- authenticate connections to the source instance. No temporary keypair will
- be created, and the values of `ssh_password` and `ssh_private_key_file`
- will be ignored. To use this option with a key pair already configured in
- the source AMI, leave the `ssh_keypair_name` blank. To associate an
- existing key pair in AWS with the source instance, set the
- `ssh_keypair_name` field to the name of the key pair.
-
-- `ssh_private_ip` (boolean) - No longer supported. See
- [`ssh_interface`](#ssh_interface). A fixer exists to migrate.
-
-- `ssh_interface` (string) - One of `public_ip`, `private_ip`, `public_dns`
- or `private_dns`. If set, either the public IP address, private IP address,
- public DNS name or private DNS name will used as the host for SSH. The
- default behaviour if inside a VPC is to use the public IP address if
- available, otherwise the private IP address will be used. If not in a VPC
- the public DNS name will be used. Also works for WinRM.
-
- Where Packer is configured for an outbound proxy but WinRM traffic should
- be direct, `ssh_interface` must be set to `private_dns` and
- `.compute.internal` included in the `NO_PROXY` environment
- variable.
-
-- `subnet_id` (string) - If using VPC, the ID of the subnet, such as
- `subnet-12345def`, where Packer will launch the EC2 instance. This field is
- required if you are using an non-default VPC.
-
-- `subnet_filter` (object) - Filters used to populate the `subnet_id` field.
- Example:
-
- ``` json
- {
- "subnet_filter": {
- "filters": {
- "tag:Class": "build"
- },
- "most_free": true,
- "random": false
- }
- }
- ```
-
- This selects the Subnet with tag `Class` with the value `build`, which has
- the most free IP addresses. NOTE: This will fail unless *exactly* one
- Subnet is returned. By using `most_free` or `random` one will be selected
- from those matching the filter.
-
- - `filters` (map of strings) - filters used to select a `subnet_id`.
- NOTE: This will fail unless *exactly* one Subnet is returned. Any
- filter described in the docs for
- [DescribeSubnets](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html)
- is valid.
-
- - `most_free` (boolean) - The Subnet with the most free IPv4 addresses
- will be used if multiple Subnets matches the filter.
-
- - `random` (boolean) - A random Subnet will be used if multiple Subnets
- matches the filter. `most_free` have precendence over this.
-
- `subnet_id` take precedence over this.
-
-- `tags` (object of key/value strings) - Tags applied to the AMI and relevant
- snapshots. This is a [template engine](/docs/templates/engine.html), see
- [Build template data](#build-template-data) for more information.
-
-- `temporary_key_pair_name` (string) - The name of the temporary keypair to
- generate. By default, Packer generates a name with a UUID.
-
-- `temporary_security_group_source_cidrs` (list of string) - A list of IPv4
- CIDR blocks to be authorized access to the instance, when packer is creating a temporary security group.
-
- The default is [`0.0.0.0/0`] (i.e., allow any IPv4 source). This is only used when `security_group_id` or `security_group_ids` is not specified.
-
-- `token` (string) - The access token to use. This is different from the
- access key and secret key. If you're not sure what this is, then you
- probably don't need it. This will also be read from the `AWS_SESSION_TOKEN`
- environmental variable.
-
-- `user_data` (string) - User data to apply when launching the instance. Note
- that you need to be careful about escaping characters due to the templates
- being JSON. It is often more convenient to use `user_data_file`, instead.
- Packer will not automatically wait for a user script to finish before
- shutting down the instance this must be handled in a provisioner.
-
-- `user_data_file` (string) - Path to a file that will be used for the user
- data when launching the instance.
-
-- `vault_aws_engine` (object) - Get credentials from Hashicorp Vault's aws
- secrets engine. You must already have created a role to use. For more
- information about generating credentials via the Vault engine, see the
- [Vault
- docs.](https://www.vaultproject.io/api/secret/aws/index.html#generate-credentials)
- If you set this flag, you must also set the below options:
-- `name` (string) - Required. Specifies the name of the role to generate
- credentials against. This is part of the request URL.
-- `engine_name` (string) - The name of the aws secrets engine. In the Vault
- docs, this is normally referred to as "aws", and Packer will default to
- "aws" if `engine_name` is not set.
-- `role_arn` (string)- The ARN of the role to assume if credential\_type on
- the Vault role is assumed\_role. Must match one of the allowed role ARNs in
- the Vault role. Optional if the Vault role only allows a single AWS role
- ARN; required otherwise.
-- `ttl` (string) - Specifies the TTL for the use of the STS token. This is
- specified as a string with a duration suffix. Valid only when
- credential\_type is assumed\_role or federation\_token. When not specified,
- the default\_sts\_ttl set for the role will be used. If that is also not
- set, then the default value of 3600s will be used. AWS places limits on the
- maximum TTL allowed. See the AWS documentation on the DurationSeconds
- parameter for AssumeRole (for assumed\_role credential types) and
- GetFederationToken (for federation\_token credential types) for more
- details.
-
- Example:
- `json { "vault_aws_engine": { "name": "myrole", "role_arn": "myarn", "ttl": "3600s" } }`
-- `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID
- in order to create a temporary security group within the VPC. Requires
- `subnet_id` to be set. If this field is left blank, Packer will try to get
- the VPC ID from the `subnet_id`.
-
-- `windows_password_timeout` (string) - The timeout for waiting for a Windows
- password for Windows instances. Defaults to 20 minutes. Example value:
- `10m`
+<%= partial "partials/builder/amazon/ebssurrogate/Config-not-required" %>
+<%= partial "partials/builder/amazon/common/AMIBlockDevices-not-required" %>
+<%= partial "partials/builder/amazon/common/AccessConfig-not-required" %>
+<%= partial "partials/builder/amazon/common/AMIConfig-not-required" %>
+<%= partial "partials/builder/amazon/common/RunConfig-not-required" %>
+<%= partial "partials/helper/communicator/Config-not-required" %>
## Basic Example
diff --git a/website/source/docs/builders/amazon-ebsvolume.html.md.erb b/website/source/docs/builders/amazon-ebsvolume.html.md.erb
index 43ac0cde3..7be033790 100644
--- a/website/source/docs/builders/amazon-ebsvolume.html.md.erb
+++ b/website/source/docs/builders/amazon-ebsvolume.html.md.erb
@@ -41,483 +41,16 @@ builder.
### Required:
-- `access_key` (string) - The access key used to communicate with AWS. [Learn
- how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials)
-
-- `instance_type` (string) - The EC2 instance type to use while building the
- AMI, such as `m1.small`.
-
-- `region` (string) - The name of the region, such as `us-east-1`, in which
- to launch the EC2 instance to create the AMI.
-
-- `secret_key` (string) - The secret key used to communicate with AWS. [Learn
- how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials)
-
-- `source_ami` (string) - The initial AMI used as a base for the newly
- created machine. `source_ami_filter` may be used instead to populate this
- automatically.
+<%= partial "partials/builder/amazon/common/AccessConfig-required" %>
+<%= partial "partials/builder/amazon/common/RunConfig-required" %>
### Optional:
-- `spot_instance_types` (array of strings) - a list of acceptable instance
- types to run your build on. We will request a spot instance using the max
- price of `spot_price` and the allocation strategy of "lowest price".
- Your instance will be launched on an instance type of the lowest available
- price that you have in your list. This is used in place of instance_type.
- You may only set either spot_instance_types or instance_type, not both.
- This feature exists to help prevent situations where a Packer build fails
- because a particular availability zone does not have capacity for the
- specific instance_type requested in instance_type.
-
-- `spot_price` (string) - The maximum hourly price to pay for a spot instance
- to create the AMI. Spot instances are a type of instance that EC2 starts
- when the current spot price is less than the maximum price you specify.
- Spot price will be updated based on available spot instance capacity and
- current spot instance requests. It may save you some costs. You can set
- this to `auto` for Packer to automatically discover the best spot price or
- to "0" to use an on demand instance (default).
-
-- `spot_price_auto_product` (string) - Required if `spot_price` is set to
- `auto`. This tells Packer what sort of AMI you're launching to find the
- best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`,
- `Windows`, `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`,
- `Windows (Amazon VPC)`
-
-- `spot_tags` (object of key/value strings) - Requires `spot_price` to be
- set. This tells Packer to apply tags to the spot request that is issued.
-
-- `ebs_volumes` (array of block device mappings) - Add the block device
- mappings to the AMI. The block device mappings allow for keys:
-
- - `device_name` (string) - The device name exposed to the instance (for
- example, `/dev/sdh` or `xvdh`). Required for every device in the block
- device mapping.
-
- - `delete_on_termination` (boolean) - Indicates whether the EBS volume is
- deleted on instance termination.
-
- - `encrypted` (boolean) - Indicates whether or not to encrypt the volume.
- By default, Packer will keep the encryption setting to what it was in
- the source image. Setting `false` will result in an unencrypted device,
- and `true` will result in an encrypted one.
-
- - `kms_key_id` (string) - The ARN for the KMS encryption key. When
- specifying `kms_key_id`, `encrypted` needs to be set to `true`. For
- valid formats see *KmsKeyId* in the [AWS API docs -
- CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
-
- - `iops` (number) - The number of I/O operations per second (IOPS) that
- the volume supports. See the documentation on
- [IOPs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html)
- for more information
-
- - `no_device` (boolean) - Suppresses the specified device included in the
- block device mapping of the AMI
-
- - `snapshot_id` (string) - The ID of the snapshot
-
- - `virtual_name` (string) - The virtual device name. See the
- documentation on [Block Device
- Mapping](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html)
- for more information
-
- - `volume_size` (number) - The size of the volume, in GiB. Required if
- not specifying a `snapshot_id`
-
- - `volume_type` (string) - The volume type. `gp2` for General Purpose
- (SSD) volumes, `io1` for Provisioned IOPS (SSD) volumes, and `standard`
- for Magnetic volumes
-
- - `tags` (map) - Tags to apply to the volume. These are retained after
- the builder completes. This is a [template
- engine](/docs/templates/engine.html), see [Build template
- data](#build-template-data) for more information.
-
-- `associate_public_ip_address` (boolean) - If using a non-default VPC,
- public IP addresses are not provided by default. If this is `true`, your
- new instance will get a Public IP. default: `false`
-
-- `availability_zone` (string) - Destination availability zone to launch
- instance in. Leave this empty to allow Amazon to auto-assign.
-
-- `block_duration_minutes` (int64) - Requires `spot_price` to be set. The
- required duration for the Spot Instances (also known as Spot blocks). This
- value must be a multiple of 60 (60, 120, 180, 240, 300, or 360). You can't
- specify an Availability Zone group or a launch group if you specify a
- duration.
-
-- `custom_endpoint_ec2` (string) - This option is useful if you use a cloud
- provider whose API is compatible with aws EC2. Specify another endpoint
- like this `https://ec2.custom.endpoint.com`.
-
-- `disable_stop_instance` (boolean) - Packer normally stops the build
- instance after all provisioners have run. For Windows instances, it is
- sometimes desirable to [run
- Sysprep](http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ami-create-standard.html)
- which will stop the instance for you. If this is set to true, Packer *will
- not* stop the instance but will assume that you will send the stop signal
- yourself through your final provisioner. You can do this with a
- [windows-shell
- provisioner](https://www.packer.io/docs/provisioners/windows-shell.html).
-
- Note that Packer will still wait for the instance to be stopped, and
- failing to send the stop signal yourself, when you have set this flag to
- `true`, will cause a timeout.
-
- Example of a valid shutdown command:
-
- ``` json
- {
- "type": "windows-shell",
- "inline": ["\"c:\\Program Files\\Amazon\\Ec2ConfigService\\ec2config.exe\" -sysprep"]
- }
- ```
-
-- `decode_authorization_messages` (boolean) - Enable automatic decoding of
- any encoded authorization (error) messages using the
- `sts:DecodeAuthorizationMessage` API. Note: requires that the effective
- user/role have permissions to `sts:DecodeAuthorizationMessage` on resource
- `*`. Default `false`.
-
-- `ebs_optimized` (boolean) - Mark instance as [EBS
- Optimized](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
- Default `false`.
-
-- `ena_support` (boolean) - Enable enhanced networking (ENA but not
- SriovNetSupport) on HVM-compatible AMIs. If set, add
- `ec2:ModifyInstanceAttribute` to your AWS IAM policy. If false, this will
- disable enhanced networking in the final AMI as opposed to passing the
- setting through unchanged from the source. Note: you must make sure
- enhanced networking is enabled on your instance. See [Amazon's
- documentation on enabling enhanced
- networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
-
-- `enable_t2_unlimited` (boolean) - Enabling T2 Unlimited allows the source
- instance to burst additional CPU beyond its available [CPU
- Credits](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-credits-baseline-concepts.html)
- for as long as the demand exists. This is in contrast to the standard
- configuration that only allows an instance to consume up to its available
- CPU Credits. See the AWS documentation for [T2
- Unlimited](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-unlimited.html)
- and the **T2 Unlimited Pricing** section of the [Amazon EC2 On-Demand
- Pricing](https://aws.amazon.com/ec2/pricing/on-demand/) document for more
- information. By default this option is disabled and Packer will set up a
- [T2
- Standard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-std.html)
- instance instead.
-
- To use T2 Unlimited you must use a T2 instance type, e.g. `t2.micro`.
- Additionally, T2 Unlimited cannot be used in conjunction with Spot
- Instances, e.g. when the `spot_price` option has been configured.
- Attempting to do so will cause an error.
-
- !> **Warning!** Additional costs may be incurred by enabling T2
- Unlimited - even for instances that would usually qualify for the [AWS Free
- Tier](https://aws.amazon.com/free/).
-
-- `iam_instance_profile` (string) - The name of an [IAM instance
- profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html)
- to launch the EC2 instance with.
-
-- `insecure_skip_tls_verify` (boolean) - This allows skipping TLS
- verification of the AWS EC2 endpoint. The default is `false`.
-
-- `mfa_code` (string) - The MFA
- [TOTP](https://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm)
- code. This should probably be a user variable since it changes all the
- time.
-
-- `profile` (string) - The profile to use in the shared credentials file for
- AWS. See Amazon's documentation on [specifying
- profiles](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-profiles)
- for more details.
-
-- `run_tags` (object of key/value strings) - Tags to apply to the instance
- that is *launched* to create the AMI. These tags are *not* applied to the
- resulting AMI unless they're duplicated in `tags`. This is a [template
- engine](/docs/templates/engine.html), see [Build template
- data](#build-template-data) for more information.
-
-- `security_group_id` (string) - The ID (*not* the name) of the security
- group to assign to the instance. By default this is not set and Packer will
- automatically create a new temporary security group to allow SSH access.
- Note that if this is specified, you must be sure the security group allows
- access to the `ssh_port` given below.
-
-- `security_group_ids` (array of strings) - A list of security groups as
- described above. Note that if this is specified, you must omit the
- `security_group_id`.
-
-- `security_group_filter` (object) - Filters used to populate the
- `security_group_ids` field. Example:
-
- ``` json
- {
- "security_group_filter": {
- "filters": {
- "tag:Class": "packer"
- }
- }
- }
- ```
-
- This selects the SG's with tag `Class` with the value `packer`.
-
- - `filters` (map of strings) - filters used to select a
- `security_group_ids`. Any filter described in the docs for
- [DescribeSecurityGroups](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
- is valid.
-
- `security_group_ids` take precedence over this.
-
-- `shutdown_behavior` (string) - Automatically terminate instances on
- shutdown in case Packer exits ungracefully. Possible values are `stop` and
- `terminate`. Defaults to `stop`.
-
-- `skip_region_validation` (boolean) - Set to `true` if you want to skip
- validation of the region configuration option. Defaults to `false`.
-
-- `snapshot_groups` (array of strings) - A list of groups that have access to
- create volumes from the snapshot(s). By default no groups have permission
- to create volumes from the snapshot(s). `all` will make the snapshot
- publicly accessible.
-
-- `snapshot_users` (array of strings) - A list of account IDs that have
- access to create volumes from the snapshot(s). By default no additional
- users other than the user creating the AMI has permissions to create
- volumes from the backing snapshot(s).
-
-- `source_ami_filter` (object) - Filters used to populate the `source_ami`
- field. Example:
-
- ``` json
- {
- "source_ami_filter": {
- "filters": {
- "virtualization-type": "hvm",
- "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
- "root-device-type": "ebs"
- },
- "owners": ["099720109477"],
- "most_recent": true
- }
- }
- ```
-
- This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
- This will fail unless *exactly* one AMI is returned. In the above example,
- `most_recent` will cause this to succeed by selecting the newest image.
-
- - `filters` (map of strings) - filters used to select a `source_ami`.
- NOTE: This will fail unless *exactly* one AMI is returned. Any filter
- described in the docs for
- [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
- is valid.
-
- - `owners` (array of strings) - Filters the images by their owner. You
- may specify one or more AWS account IDs, "self" (which will use the
- account whose credentials you are using to run Packer), or an AWS owner
- alias: for example, "amazon", "aws-marketplace", or "microsoft". This
- option is required for security reasons.
-
- - `most_recent` (boolean) - Selects the newest created image when true.
- This is most useful for selecting a daily distro build.
-
- You may set this in place of `source_ami` or in conjunction with it. If you
- set this in conjunction with `source_ami`, the `source_ami` will be added
- to the filter. The provided `source_ami` must meet all of the filtering
- criteria provided in `source_ami_filter`; this pins the AMI returned by the
- filter, but will cause Packer to fail if the `source_ami` does not exist.
-
-- `spot_instance_types` (array of strings) - a list of acceptable instance
- types to run your build on. We will request a spot instance using the max
- price of `spot_price` and the allocation strategy of "lowest price".
- Your instance will be launched on an instance type of the lowest available
- price that you have in your list. This is used in place of instance_type.
- You may only set either spot_instance_types or instance_type, not both.
- This feature exists to help prevent situations where a Packer build fails
- because a particular availability zone does not have capacity for the
- specific instance_type requested in instance_type.
-
-- `spot_price` (string) - The maximum hourly price to pay for a spot instance
- to create the AMI. Spot instances are a type of instance that EC2 starts
- when the current spot price is less than the maximum price you specify.
- Spot price will be updated based on available spot instance capacity and
- current spot instance requests. It may save you some costs. You can set
- this to `auto` for Packer to automatically discover the best spot price or
- to "0" to use an on demand instance (default).
-
-- `spot_price_auto_product` (string) - Required if `spot_price` is set to
- `auto`. This tells Packer what sort of AMI you're launching to find the
- best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`,
- `Windows`, `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`,
- `Windows (Amazon VPC)`
-
-- `spot_tags` (object of key/value strings) - Requires `spot_price` to be
- set. This tells Packer to apply tags to the spot request that is issued.
-
-- `sriov_support` (boolean) - Enable enhanced networking (SriovNetSupport but
- not ENA) on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute`
- to your AWS IAM policy. Note: you must make sure enhanced networking is
- enabled on your instance. See [Amazon's documentation on enabling enhanced
- networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
- Default `false`.
-
-- `ssh_keypair_name` (string) - If specified, this is the key that will be
- used for SSH with the machine. By default, this is blank, and Packer will
- generate a temporary key pair unless
- [`ssh_password`](/docs/templates/communicator.html#ssh_password) is used.
- [`ssh_private_key_file`](/docs/templates/communicator.html#ssh_private_key_file)
- must be specified with this.
-
-- `ssh_private_ip` (boolean) - No longer supported. See
- [`ssh_interface`](#ssh_interface). A fixer exists to migrate.
-
-- `ssh_interface` (string) - One of `public_ip`, `private_ip`, `public_dns`
- or `private_dns`. If set, either the public IP address, private IP address,
- public DNS name or private DNS name will used as the host for SSH. The
- default behaviour if inside a VPC is to use the public IP address if
- available, otherwise the private IP address will be used. If not in a VPC
- the public DNS name will be used. Also works for WinRM.
-
- Where Packer is configured for an outbound proxy but WinRM traffic should
- be direct, `ssh_interface` must be set to `private_dns` and
- `.compute.internal` included in the `NO_PROXY` environment
- variable.
-
-- `subnet_id` (string) - If using VPC, the ID of the subnet, such as
- `subnet-12345def`, where Packer will launch the EC2 instance. This field is
- required if you are using an non-default VPC.
-
-- `subnet_filter` (object) - Filters used to populate the `subnet_id` field.
- Example:
-
- ``` json
- {
- "subnet_filter": {
- "filters": {
- "tag:Class": "build"
- },
- "most_free": true,
- "random": false
- }
- }
- ```
-
- This selects the Subnet with tag `Class` with the value `build`, which has
- the most free IP addresses. NOTE: This will fail unless *exactly* one
- Subnet is returned. By using `most_free` or `random` one will be selected
- from those matching the filter.
-
- - `filters` (map of strings) - filters used to select a `subnet_id`.
- NOTE: This will fail unless *exactly* one Subnet is returned. Any
- filter described in the docs for
- [DescribeSubnets](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html)
- is valid.
-
- - `most_free` (boolean) - The Subnet with the most free IPv4 addresses
- will be used if multiple Subnets matches the filter.
-
- - `random` (boolean) - A random Subnet will be used if multiple Subnets
- matches the filter. `most_free` have precendence over this.
-
- `subnet_id` take precedence over this.
-
-- `temporary_key_pair_name` (string) - The name of the temporary key pair to
- generate. By default, Packer generates a name that looks like
- `packer_`, where <UUID> is a 36 character unique identifier.
-
-- `temporary_security_group_source_cidrs` (list of string) - A list of IPv4
- CIDR blocks to be authorized access to the instance, when packer is creating a temporary security group.
-
- The default is [`0.0.0.0/0`] (i.e., allow any IPv4 source). This is only used when `security_group_id` or `security_group_ids` is not specified.
-
-- `token` (string) - The access token to use. This is different from the
- access key and secret key. If you're not sure what this is, then you
- probably don't need it. This will also be read from the `AWS_SESSION_TOKEN`
- environmental variable.
-
-- `user_data` (string) - User data to apply when launching the instance. Note
- that you need to be careful about escaping characters due to the templates
- being JSON. It is often more convenient to use `user_data_file`, instead.
- Packer will not automatically wait for a user script to finish before
- shutting down the instance this must be handled in a provisioner.
-
-- `user_data_file` (string) - Path to a file that will be used for the user
- data when launching the instance.
-
-- `vault_aws_engine` (object) - Get credentials from Hashicorp Vault's aws
- secrets engine. You must already have created a role to use. For more
- information about generating credentials via the Vault engine, see the
- [Vault docs.]
- (https://www.vaultproject.io/api/secret/aws/index.html#generate-credentials)
- If you set this
- flag, you must also set the below options:
- - `name` (string) - Required. Specifies the name of the role to generate
- credentials against. This is part of the request URL.
- - `engine_name` (string) - The name of the aws secrets engine. In the Vault
- docs, this is normally referred to as "aws", and Packer will default to
- "aws" if `engine_name` is not set.
- - `role_arn` (string)- The ARN of the role to assume if credential_type on
- the Vault role is assumed_role. Must match one of the allowed role ARNs
- in the Vault role. Optional if the Vault role only allows a single AWS
- role ARN; required otherwise.
- - `ttl` (string) - Specifies the TTL for the use of the STS token. This is
- specified as a string with a duration suffix. Valid only when
- credential_type is assumed_role or federation_token. When not specified,
- the default_sts_ttl set for the role will be used. If that is also not
- set, then the default value of 3600s will be used. AWS places limits on
- the maximum TTL allowed. See the AWS documentation on the DurationSeconds
- parameter for AssumeRole (for assumed_role credential types) and
- GetFederationToken (for federation_token credential types) for more
- details.
-
- Example:
- ``` json
- {
- "vault_aws_engine": {
- "name": "myrole",
- "role_arn": "myarn",
- "ttl": "3600s"
- }
- }
- ```
-
-- `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID
- in order to create a temporary security group within the VPC. Requires
- `subnet_id` to be set. If this field is left blank, Packer will try to get
- the VPC ID from the `subnet_id`.
-
-- `vpc_filter` (object) - Filters used to populate the `vpc_id` field.
- `vpc_id` take precedence over this.
- Example:
-
- ``` json
- {
- "vpc_filter": {
- "filters": {
- "tag:Class": "build",
- "isDefault": "false",
- "cidr": "/24"
- }
- }
- }
- ```
-
- This selects the VPC with tag `Class` with the value `build`, which is not
- the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
- unless *exactly* one VPC is returned.
-
- - `filters` (map of strings) - filters used to select a `vpc_id`. NOTE:
- This will fail unless *exactly* one VPC is returned. Any filter
- described in the docs for
- [DescribeVpcs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html)
- is valid.
-
-
-- `windows_password_timeout` (string) - The timeout for waiting for a Windows
- password for Windows instances. Defaults to 20 minutes. Example value:
- `10m`
+<%= partial "partials/builder/amazon/common/AccessConfig-not-required" %>
+<%= partial "partials/builder/amazon/common/RunConfig-not-required" %>
+<%= partial "partials/builder/amazon/ebsvolume/Config-not-required" %>
+<%= partial "partials/builder/amazon/common/RunConfig-not-required" %>
+<%= partial "partials/helper/communicator/Config-not-required" %>
## Basic Example
diff --git a/website/source/docs/builders/amazon-instance.html.md.erb b/website/source/docs/builders/amazon-instance.html.md.erb
index e19912e7c..62025b43f 100644
--- a/website/source/docs/builders/amazon-instance.html.md.erb
+++ b/website/source/docs/builders/amazon-instance.html.md.erb
@@ -53,489 +53,21 @@ builder.
### Required:
-- `access_key` (string) - The access key used to communicate with AWS. [Learn
- how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials)
-- `account_id` (string) - Your AWS account ID. This is required for bundling
- the AMI. This is *not the same* as the access key. You can find your
- account ID in the security credentials page of your AWS account.
-
-- `ami_name` (string) - The name of the resulting AMI that will appear when
- managing AMIs in the AWS console or via APIs. This must be unique. To help
- make this unique, use a function like `timestamp` (see [configuration
- templates](/docs/templates/engine.html) for more info)
-
-- `instance_type` (string) - The EC2 instance type to use while building the
- AMI, such as `m1.small`.
-
-- `region` (string) - The name of the region, such as `us-east-1`, in which
- to launch the EC2 instance to create the AMI.
-
-- `s3_bucket` (string) - The name of the S3 bucket to upload the AMI. This
- bucket will be created if it doesn't exist.
-
-- `secret_key` (string) - The secret key used to communicate with AWS. [Learn
- how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials)
-
-- `source_ami` (string) - The initial AMI used as a base for the newly
- created machine.
-
-- `x509_cert_path` (string) - The local path to a valid X509 certificate for
- your AWS account. This is used for bundling the AMI. This X509 certificate
- must be registered with your account from the security credentials page in
- the AWS console.
-
-- `x509_key_path` (string) - The local path to the private key for the X509
- certificate specified by `x509_cert_path`. This is used for bundling the
- AMI.
+<%= partial "partials/builder/amazon/common/AccessConfig-required" %>
+<%= partial "partials/builder/amazon/common/AMIConfig-required" %>
+<%= partial "partials/builder/amazon/common/RunConfig-required" %>
+<%= partial "partials/builder/amazon/instance/Config-required" %>
### Optional:
-- `ami_block_device_mappings` (array of block device mappings) - Add one or
- more [block device
- mappings](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html)
- to the AMI. These will be attached when booting a new instance from your
- AMI. To add a block device during the Packer build see
- `launch_block_device_mappings` below. Your options here may vary depending
- on the type of VM you use. The block device mappings allow for the
- following configuration:
-
- <%= partial "partials/builders/aws-common-block-device-a-i" %>
-
- <%= partial "partials/builders/aws-common-block-device-i-v" %>
-
-- `ami_description` (string) - The description to set for the resulting
- AMI(s). By default this description is empty. This is a [template
- engine](/docs/templates/engine.html), see [Build template
- data](#build-template-data) for more information.
-
-- `ami_groups` (array of strings) - A list of groups that have access to
- launch the resulting AMI(s). By default no groups have permission to launch
- the AMI. `all` will make the AMI publicly accessible. AWS currently doesn't
- accept any value other than `all`.
-
-- `ami_product_codes` (array of strings) - A list of product codes to
- associate with the AMI. By default no product codes are associated with the
- AMI.
-
-- `ami_regions` (array of strings) - A list of regions to copy the AMI to.
- Tags and attributes are copied along with the AMI. AMI copying takes time
- depending on the size of the AMI, but will generally take many minutes.
-
-- `ami_users` (array of strings) - A list of account IDs that have access to
- launch the resulting AMI(s). By default no additional users other than the
- user creating the AMI has permissions to launch it.
-
-- `ami_virtualization_type` (string) - The type of virtualization for the AMI
- you are building. This option is required to register HVM images. Can be
- `paravirtual` (default) or `hvm`.
-
-- `associate_public_ip_address` (boolean) - If using a non-default VPC,
- public IP addresses are not provided by default. If this is `true`, your
- new instance will get a Public IP. default: `false`
-
-- `availability_zone` (string) - Destination availability zone to launch
- instance in. Leave this empty to allow Amazon to auto-assign.
-
-- `block_duration_minutes` (int64) - Requires `spot_price` to be set. The
- required duration for the Spot Instances (also known as Spot blocks). This
- value must be a multiple of 60 (60, 120, 180, 240, 300, or 360). You can't
- specify an Availability Zone group or a launch group if you specify a
- duration.
-
-- `bundle_destination` (string) - The directory on the running instance where
- the bundled AMI will be saved prior to uploading. By default this is
- `/tmp`. This directory must exist and be writable.
-
-- `bundle_prefix` (string) - The prefix for files created from bundling the
- root volume. By default this is `image-{{timestamp}}`. The `timestamp`
- variable should be used to make sure this is unique, otherwise it can
- collide with other created AMIs by Packer in your account.
-
-- `bundle_upload_command` (string) - The command to use to upload the bundled
- volume. See the "custom bundle commands" section below for more
- information.
-
-- `bundle_vol_command` (string) - The command to use to bundle the volume.
- See the "custom bundle commands" section below for more information.
-
-- `custom_endpoint_ec2` (string) - This option is useful if you use a cloud
- provider whose API is compatible with aws EC2. Specify another endpoint
- like this `https://ec2.custom.endpoint.com`.
-
-- `decode_authorization_messages` (boolean) - Enable automatic decoding of
- any encoded authorization (error) messages using the
- `sts:DecodeAuthorizationMessage` API. Note: requires that the effective
- user/role have permissions to `sts:DecodeAuthorizationMessage` on resource
- `*`. Default `false`.
-
-- `ebs_optimized` (boolean) - Mark instance as [EBS
- Optimized](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
- Default `false`.
-
-- `ena_support` (boolean) - Enable enhanced networking (ENA but not
- SriovNetSupport) on HVM-compatible AMIs. If set, add
- `ec2:ModifyInstanceAttribute` to your AWS IAM policy. If false, this will
- disable enhanced networking in the final AMI as opposed to passing the
- setting through unchanged from the source. Note: you must make sure
- enhanced networking is enabled on your instance. See [Amazon's
- documentation on enabling enhanced
- networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
-
-- `enable_t2_unlimited` (boolean) - Enabling T2 Unlimited allows the source
- instance to burst additional CPU beyond its available [CPU
- Credits](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-credits-baseline-concepts.html)
- for as long as the demand exists. This is in contrast to the standard
- configuration that only allows an instance to consume up to its available
- CPU Credits. See the AWS documentation for [T2
- Unlimited](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-unlimited.html)
- and the **T2 Unlimited Pricing** section of the [Amazon EC2 On-Demand
- Pricing](https://aws.amazon.com/ec2/pricing/on-demand/) document for more
- information. By default this option is disabled and Packer will set up a
- [T2
- Standard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-std.html)
- instance instead.
-
- To use T2 Unlimited you must use a T2 instance type, e.g. `t2.micro`.
- Additionally, T2 Unlimited cannot be used in conjunction with Spot
- Instances, e.g. when the `spot_price` option has been configured.
- Attempting to do so will cause an error.
-
- !> **Warning!** Additional costs may be incurred by enabling T2
- Unlimited - even for instances that would usually qualify for the [AWS Free
- Tier](https://aws.amazon.com/free/).
-
-- `force_deregister` (boolean) - Force Packer to first deregister an existing
- AMI if one with the same name already exists. Defaults to `false`.
-
-- `force_delete_snapshot` (boolean) - Force Packer to delete snapshots
- associated with AMIs, which have been deregistered by `force_deregister`.
- Defaults to `false`.
-
-- `iam_instance_profile` (string) - The name of an [IAM instance
- profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html)
- to launch the EC2 instance with.
-
-- `launch_block_device_mappings` (array of block device mappings) - Add one
- or more block devices before the Packer build starts. If you add instance
- store volumes or EBS volumes in addition to the root device volume, the
- created AMI will contain block device mapping information for those
- volumes. Amazon creates snapshots of the source instance's root volume and
- any other EBS volumes described here. When you launch an instance from this
- new AMI, the instance automatically launches with these additional volumes,
- and will restore them from snapshots taken from the source instance.
-
-- `mfa_code` (string) - The MFA
- [TOTP](https://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm)
- code. This should probably be a user variable since it changes all the
- time.
-
-- `profile` (string) - The profile to use in the shared credentials file for
- AWS. See Amazon's documentation on [specifying
- profiles](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-profiles)
- for more details.
-
-- `region_kms_key_ids` (map of strings) - a map of regions to copy the ami
- to, along with the custom kms key id (alias or arn) to use for encryption
- for that region. Keys must match the regions provided in `ami_regions`. If
- you just want to encrypt using a default ID, you can stick with
- `kms_key_id` and `ami_regions`. If you want a region to be encrypted with
- that region's default key ID, you can use an empty string `""` instead of a
- key id in this map. (e.g. `"us-east-1": ""`) However, you cannot use
- default key IDs if you are using this in conjunction with `snapshot_users`
- -- in that situation you must use custom keys. For valid formats see
- *KmsKeyId* in the [AWS API docs -
- CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
-
-- `run_tags` (object of key/value strings) - Tags to apply to the instance
- that is *launched* to create the AMI. These tags are *not* applied to the
- resulting AMI unless they're duplicated in `tags`. This is a [template
- engine](/docs/templates/engine.html), see [Build template
- data](#build-template-data) for more information.
-
-- `security_group_id` (string) - The ID (*not* the name) of the security
- group to assign to the instance. By default this is not set and Packer will
- automatically create a new temporary security group to allow SSH access.
- Note that if this is specified, you must be sure the security group allows
- access to the `ssh_port` given below.
-
-- `security_group_ids` (array of strings) - A list of security groups as
- described above. Note that if this is specified, you must omit the
- `security_group_id`.
-
-- `security_group_filter` (object) - Filters used to populate the
- `security_group_ids` field. Example:
-
- ``` json
- {
- "security_group_filter": {
- "filters": {
- "tag:Class": "packer"
- }
- }
- }
- ```
-
- This selects the SG's with tag `Class` with the value `packer`.
-
- - `filters` (map of strings) - filters used to select a
- `security_group_ids`. Any filter described in the docs for
- [DescribeSecurityGroups](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
- is valid.
-
- `security_group_ids` take precedence over this.
-
-- `skip_region_validation` (boolean) - Set to true if you want to skip
- validation of the region configuration option. Defaults to `false`.
-
-- `snapshot_groups` (array of strings) - A list of groups that have access to
- create volumes from the snapshot(s). By default no groups have permission
- to create volumes form the snapshot(s). `all` will make the snapshot
- publicly accessible.
-
-- `snapshot_users` (array of strings) - A list of account IDs that have
- access to create volumes from the snapshot(s). By default no additional
- users other than the user creating the AMI has permissions to create
- volumes from the backing snapshot(s).
-
-- `source_ami_filter` (object) - Filters used to populate the `source_ami`
- field. Example:
-
- ``` json
- {
- "source_ami_filter": {
- "filters": {
- "virtualization-type": "hvm",
- "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
- "root-device-type": "ebs"
- },
- "owners": ["099720109477"],
- "most_recent": true
- }
- }
- ```
-
- This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
- This will fail unless *exactly* one AMI is returned. In the above example,
- `most_recent` will cause this to succeed by selecting the newest image.
-
- - `filters` (map of strings) - filters used to select a `source_ami`.
- NOTE: This will fail unless *exactly* one AMI is returned. Any filter
- described in the docs for
- [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
- is valid.
-
- - `owners` (array of strings) - Filters the images by their owner. You
- may specify one or more AWS account IDs, "self" (which will use the
- account whose credentials you are using to run Packer), or an AWS owner
- alias: for example, "amazon", "aws-marketplace", or "microsoft". This
- option is required for security reasons.
-
- - `most_recent` (boolean) - Selects the newest created image when true.
- This is most useful for selecting a daily distro build.
-
- You may set this in place of `source_ami` or in conjunction with it. If you
- set this in conjunction with `source_ami`, the `source_ami` will be added
- to the filter. The provided `source_ami` must meet all of the filtering
- criteria provided in `source_ami_filter`; this pins the AMI returned by the
- filter, but will cause Packer to fail if the `source_ami` does not exist.
-
-- `snapshot_tags` (object of key/value strings) - Tags to apply to snapshot.
- They will override AMI tags if already applied to snapshot.
-
-- `spot_instance_types` (array of strings) - a list of acceptable instance
- types to run your build on. We will request a spot instance using the max
- price of `spot_price` and the allocation strategy of "lowest price".
- Your instance will be launched on an instance type of the lowest available
- price that you have in your list. This is used in place of instance_type.
- You may only set either spot_instance_types or instance_type, not both.
- This feature exists to help prevent situations where a Packer build fails
- because a particular availability zone does not have capacity for the
- specific instance_type requested in instance_type.
-
-- `spot_price` (string) - The maximum hourly price to pay for a spot instance
- to create the AMI. Spot instances are a type of instance that EC2 starts
- when the current spot price is less than the maximum price you specify.
- Spot price will be updated based on available spot instance capacity and
- current spot instance requests. It may save you some costs. You can set
- this to `auto` for Packer to automatically discover the best spot price or
- to "0" to use an on demand instance (default).
-
-- `spot_price_auto_product` (string) - Required if `spot_price` is set to
- `auto`. This tells Packer what sort of AMI you're launching to find the
- best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`,
- `Windows`, `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`,
- `Windows (Amazon VPC)`
-
-- `spot_tags` (object of key/value strings) - Requires `spot_price` to be
- set. This tells Packer to apply tags to the spot request that is issued.
-
-- `sriov_support` (boolean) - Enable enhanced networking (SriovNetSupport but
- not ENA) on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute`
- to your AWS IAM policy. Note: you must make sure enhanced networking is
- enabled on your instance. See [Amazon's documentation on enabling enhanced
- networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
- Default `false`.
-
-- `ssh_keypair_name` (string) - If specified, this is the key that will be
- used for SSH with the machine. The key must match a key pair name loaded up
- into Amazon EC2. By default, this is blank, and Packer will generate a
- temporary key pair unless
- [`ssh_password`](/docs/templates/communicator.html#ssh_password) is used.
- [`ssh_private_key_file`](/docs/templates/communicator.html#ssh_private_key_file)
- or `ssh_agent_auth` must be specified when `ssh_keypair_name` is utilized.
-
-- `ssh_agent_auth` (boolean) - If true, the local SSH agent will be used to
- authenticate connections to the source instance. No temporary key pair will
- be created, and the values of `ssh_password` and `ssh_private_key_file`
- will be ignored. To use this option with a key pair already configured in
- the source AMI, leave the `ssh_keypair_name` blank. To associate an
- existing key pair in AWS with the source instance, set the
- `ssh_keypair_name` field to the name of the key pair.
-
-- `ssh_private_ip` (boolean) - No longer supported. See
- [`ssh_interface`](#ssh_interface). A fixer exists to migrate.
-
-- `ssh_interface` (string) - One of `public_ip`, `private_ip`, `public_dns`
- or `private_dns`. If set, either the public IP address, private IP address,
- public DNS name or private DNS name will used as the host for SSH. The
- default behaviour if inside a VPC is to use the public IP address if
- available, otherwise the private IP address will be used. If not in a VPC
- the public DNS name will be used. Also works for WinRM.
-
- Where Packer is configured for an outbound proxy but WinRM traffic should
- be direct, `ssh_interface` must be set to `private_dns` and
- `.compute.internal` included in the `NO_PROXY` environment
- variable.
-
-- `subnet_id` (string) - If using VPC, the ID of the subnet, such as
- `subnet-12345def`, where Packer will launch the EC2 instance. This field is
- required if you are using an non-default VPC.
-
-- `subnet_filter` (object) - Filters used to populate the `subnet_id` field.
- Example:
-
- ``` json
- {
- "subnet_filter": {
- "filters": {
- "tag:Class": "build"
- },
- "most_free": true,
- "random": false
- }
- }
- ```
-
- This selects the Subnet with tag `Class` with the value `build`, which has
- the most free IP addresses. NOTE: This will fail unless *exactly* one
- Subnet is returned. By using `most_free` or `random` one will be selected
- from those matching the filter.
-
- - `filters` (map of strings) - filters used to select a `subnet_id`.
- NOTE: This will fail unless *exactly* one Subnet is returned. Any
- filter described in the docs for
- [DescribeSubnets](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html)
- is valid.
-
- - `most_free` (boolean) - The Subnet with the most free IPv4 addresses
- will be used if multiple Subnets matches the filter.
-
- - `random` (boolean) - A random Subnet will be used if multiple Subnets
- matches the filter. `most_free` have precendence over this.
-
- `subnet_id` take precedence over this.
-
-- `tags` (object of key/value strings) - Tags applied to the AMI. This is a
- [template engine](/docs/templates/engine.html), see [Build template
- data](#build-template-data) for more information.
-
-- `temporary_key_pair_name` (string) - The name of the temporary key pair to
- generate. By default, Packer generates a name that looks like
- `packer_`, where <UUID> is a 36 character unique identifier.
-
-- `temporary_security_group_source_cidrs` (list of string) - A list of IPv4
- CIDR blocks to be authorized access to the instance, when packer is creating a temporary security group.
-
- The default is [`0.0.0.0/0`] (i.e., allow any IPv4 source). This is only used when `security_group_id` or `security_group_ids` is not specified.
-
-- `user_data` (string) - User data to apply when launching the instance. Note
- that you need to be careful about escaping characters due to the templates
- being JSON. It is often more convenient to use `user_data_file`, instead.
- Packer will not automatically wait for a user script to finish before
- shutting down the instance this must be handled in a provisioner.
-
-- `user_data_file` (string) - Path to a file that will be used for the user
- data when launching the instance.
-
-- `vault_aws_engine` (object) - Get credentials from Hashicorp Vault's aws
- secrets engine. You must already have created a role to use. For more
- information about generating credentials via the Vault engine, see the
- [Vault
- docs.](https://www.vaultproject.io/api/secret/aws/index.html#generate-credentials)
- If you set this flag, you must also set the below options:
- - `name` (string) - Required. Specifies the name of the role to generate
- credentials against. This is part of the request URL.
- - `engine_name` (string) - The name of the aws secrets engine. In the
- Vault docs, this is normally referred to as "aws", and Packer will
- default to "aws" if `engine_name` is not set.
- - `role_arn` (string)- The ARN of the role to assume if credential\_type
- on the Vault role is assumed\_role. Must match one of the allowed role
- ARNs in the Vault role. Optional if the Vault role only allows a single
- AWS role ARN; required otherwise.
- - `ttl` (string) - Specifies the TTL for the use of the STS token. This
- is specified as a string with a duration suffix. Valid only when
- credential\_type is assumed\_role or federation\_token. When not
- specified, the default\_sts\_ttl set for the role will be used. If that
- is also not set, then the default value of 3600s will be used. AWS
- places limits on the maximum TTL allowed. See the AWS documentation on
- the DurationSeconds parameter for AssumeRole (for assumed\_role
- credential types) and GetFederationToken (for federation\_token
- credential types) for more details.
-
- Example:
- `json { "vault_aws_engine": { "name": "myrole", "role_arn": "myarn", "ttl": "3600s" } }`
-
-- `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID
- in order to create a temporary security group within the VPC. Requires
- `subnet_id` to be set. If this field is left blank, Packer will try to get
- the VPC ID from the `subnet_id`.
-
-- `vpc_filter` (object) - Filters used to populate the `vpc_id` field.
- `vpc_id` take precedence over this.
- Example:
-
- ``` json
- {
- "vpc_filter": {
- "filters": {
- "tag:Class": "build",
- "isDefault": "false",
- "cidr": "/24"
- }
- }
- }
- ```
-
- This selects the VPC with tag `Class` with the value `build`, which is not
- the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
- unless *exactly* one VPC is returned.
-
- - `filters` (map of strings) - filters used to select a `vpc_id`. NOTE:
- This will fail unless *exactly* one VPC is returned. Any filter
- described in the docs for
- [DescribeVpcs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html)
- is valid.
-
-- `x509_upload_path` (string) - The path on the remote machine where the X509
- certificate will be uploaded. This path must already exist and be writable.
- X509 certificates are uploaded after provisioning is run, so it is
- perfectly okay to create this directory as part of the provisioning
- process. Defaults to `/tmp`.
-
-- `windows_password_timeout` (string) - The timeout for waiting for a Windows
- password for Windows instances. Defaults to 20 minutes. Example value:
- `10m`
+<%= partial "partials/builder/amazon/common/AMIBlockDevices-not-required" %>
+<%= partial "partials/builder/amazon/common/AccessConfig-not-required" %>
+<%= partial "partials/builder/amazon/common/AMIConfig-not-required" %>
+<%= partial "partials/builder/amazon/common/BlockDevice-not-required" %>
+<%= partial "partials/builder/amazon/common/RunConfig-not-required" %>
+<%= partial "partials/builder/amazon/instance/Config-not-required" %>
+<%= partial "partials/helper/communicator/Config-not-required" %>
## Basic Example
From 13fedfaa09f5ff8d6e143afd915a64c7048d06bd Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Thu, 6 Jun 2019 16:46:24 +0200
Subject: [PATCH 32/97] azure: use auto-generated partials
---
.../{azure.html.md => azure.html.md.erb} | 200 +-----------------
1 file changed, 3 insertions(+), 197 deletions(-)
rename website/source/docs/builders/{azure.html.md => azure.html.md.erb} (62%)
diff --git a/website/source/docs/builders/azure.html.md b/website/source/docs/builders/azure.html.md.erb
similarity index 62%
rename from website/source/docs/builders/azure.html.md
rename to website/source/docs/builders/azure.html.md.erb
index e1532d0c4..782f8a974 100644
--- a/website/source/docs/builders/azure.html.md
+++ b/website/source/docs/builders/azure.html.md.erb
@@ -66,25 +66,7 @@ you should specify `subscription_id`, `client_id` and one of `client_secret`,
### Required:
-- `image_publisher` (string) - PublisherName for your base image. See
- [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/)
- for details.
-
- CLI example `az vm image list-publishers --location westus`
-
-- `image_offer` (string) - Offer for your base image. See
- [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/)
- for details.
-
- CLI example
- `az vm image list-offers --location westus --publisher Canonical`
-
-- `image_sku` (string) - SKU for your base image. See
- [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/)
- for details.
-
- CLI example
- `az vm image list-skus --location westus --publisher Canonical --offer UbuntuServer`
+<%= partial "partials/builder/azure/arm/Config-required" %>
#### VHD or Managed Image
@@ -178,184 +160,8 @@ Providing `temp_resource_group_name` or `location` in combination with
### Optional:
-- `azure_tags` (object of name/value strings) - the user can define up to 15
- tags. Tag names cannot exceed 512 characters, and tag values cannot exceed
- 256 characters. Tags are applied to every resource deployed by a Packer
- build, i.e. Resource Group, VM, NIC, VNET, Public IP, KeyVault, etc.
-
-- `cloud_environment_name` (string) - One of `Public`, `China`, `Germany`, or
- `USGovernment`. Defaults to `Public`. Long forms such as
- `USGovernmentCloud` and `AzureUSGovernmentCloud` are also supported.
-
-- `custom_data_file` (string) - Specify a file containing custom data to inject
- into the cloud-init process. The contents of the file are read and injected
- into the ARM template. The custom data will be passed to cloud-init for
- processing at the time of provisioning. See
- [documentation](http://cloudinit.readthedocs.io/en/latest/topics/examples.html)
- to learn more about custom data, and how it can be used to influence the
- provisioning process.
-
-- `custom_managed_image_name` (string) - Specify the source managed image's
- name to use. If this value is set, do not set image\_publisher,
- image\_offer, image\_sku, or image\_version. If this value is set, the
- value `custom_managed_image_resource_group_name` must also be set. See
- [documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images)
- to learn more about managed images.
-
-- `custom_managed_image_resource_group_name` (string) - Specify the source
- managed image's resource group used to use. If this value is set, do not
- set image\_publisher, image\_offer, image\_sku, or image\_version. If this
- value is set, the value `custom_managed_image_name` must also be set. See
- [documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images)
- to learn more about managed images.
-
-- `image_version` (string) - Specify a specific version of an OS to boot from.
- Defaults to `latest`. There may be a difference in versions available
- across regions due to image synchronization latency. To ensure a consistent
- version across regions set this value to one that is available in all
- regions where you are deploying.
-
- CLI example
- `az vm image list --location westus --publisher Canonical --offer UbuntuServer --sku 16.04.0-LTS --all`
-
-- `image_url` (string) - Specify a custom VHD to use. If this value is set, do
- not set image\_publisher, image\_offer, image\_sku, or image\_version.
-
-- `managed_image_storage_account_type` (string) - Specify the storage account
- type for a managed image. Valid values are Standard\_LRS and Premium\_LRS.
- The default is Standard\_LRS.
-
-- `os_disk_size_gb` (number) - Specify the size of the OS disk in GB
- (gigabytes). Values of zero or less than zero are ignored.
-
-- `disk_caching_type` (string) - Specify the disk caching type. Valid values
- are None, ReadOnly, and ReadWrite. The default value is ReadWrite.
-
-- `disk_additional_size` (array of integers) - The size(s) of any additional
- hard disks for the VM in gigabytes. If this is not specified then the VM
- will only contain an OS disk. The number of additional disks and maximum
- size of a disk depends on the configuration of your VM. See
- [Windows](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/about-disks-and-vhds)
- or
- [Linux](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/about-disks-and-vhds)
- for more information.
-
- For VHD builds the final artifacts will be named
- `PREFIX-dataDisk-.UUID.vhd` and stored in the specified capture
- container along side the OS disk. The additional disks are included in the
- deployment template `PREFIX-vmTemplate.UUID`.
-
- For Managed build the final artifacts are included in the managed image.
- The additional disk will have the same storage account type as the OS disk,
- as specified with the `managed_image_storage_account_type` setting.
-
-- `os_type` (string) - If either `Linux` or `Windows` is specified Packer will
- automatically configure authentication credentials for the provisioned
- machine. For `Linux` this configures an SSH authorized key. For `Windows`
- this configures a WinRM certificate.
-
-- `plan_info` (object) - Used for creating images from Marketplace images.
- Please refer to [Deploy an image with Marketplace
- terms](https://aka.ms/azuremarketplaceapideployment) for more details. Not
- all Marketplace images support programmatic deployment, and support is
- controlled by the image publisher.
-
- An example plan\_info object is defined below.
-
- ``` json
- {
- "plan_info": {
- "plan_name": "rabbitmq",
- "plan_product": "rabbitmq",
- "plan_publisher": "bitnami"
- }
- }
- ```
-
- `plan_name` (string) - The plan name, required. `plan_product` (string) -
- The plan product, required. `plan_publisher` (string) - The plan publisher,
- required. `plan_promotion_code` (string) - Some images accept a promotion
- code, optional.
-
- Images created from the Marketplace with `plan_info` **must** specify
- `plan_info` whenever the image is deployed. The builder automatically adds
- tags to the image to ensure this information is not lost. The following
- tags are added.
-
- 1. PlanName
- 2. PlanProduct
- 3. PlanPublisher
- 4. PlanPromotionCode
-
-- `shared_image_gallery` (object) - Use a [Shared Gallery
- image](https://azure.microsoft.com/en-us/blog/announcing-the-public-preview-of-shared-image-gallery/)
- as the source for this build. *VHD targets are incompatible with this build
- type* - the target must be a *Managed Image*.
-
- "shared_image_gallery": {
- "subscription": "00000000-0000-0000-0000-00000000000",
- "resource_group": "ResourceGroup",
- "gallery_name": "GalleryName",
- "image_name": "ImageName",
- "image_version": "1.0.0"
- }
- "managed_image_name": "TargetImageName",
- "managed_image_resource_group_name": "TargetResourceGroup"
-
-- `temp_compute_name` (string) - temporary name assigned to the VM. If this
- value is not set, a random value will be assigned. Knowing the resource
- group and VM name allows one to execute commands to update the VM during a
- Packer build, e.g. attach a resource disk to the VM.
-
-- `tenant_id` (string) - The account identifier with which your `client_id` and
- `subscription_id` are associated. If not specified, `tenant_id` will be
- looked up using `subscription_id`.
-
-- `private_virtual_network_with_public_ip` (boolean) - This value allows you to
- set a `virtual_network_name` and obtain a public IP. If this value is not
- set and `virtual_network_name` is defined Packer is only allowed to be
- executed from a host on the same subnet / virtual network.
-
-- `virtual_network_name` (string) - Use a pre-existing virtual network for the
- VM. This option enables private communication with the VM, no public IP
- address is **used** or **provisioned** (unless you set
- `private_virtual_network_with_public_ip`).
-
-- `virtual_network_resource_group_name` (string) - If virtual\_network\_name is
- set, this value **may** also be set. If virtual\_network\_name is set, and
- this value is not set the builder attempts to determine the resource group
- containing the virtual network. If the resource group cannot be found, or
- it cannot be disambiguated, this value should be set.
-
-- `virtual_network_subnet_name` (string) - If virtual\_network\_name is set,
- this value **may** also be set. If virtual\_network\_name is set, and this
- value is not set the builder attempts to determine the subnet to use with
- the virtual network. If the subnet cannot be found, or it cannot be
- disambiguated, this value should be set.
-
-- `vm_size` (string) - Size of the VM used for building. This can be changed
- when you deploy a VM from your VHD. See
- [pricing](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/)
- information. Defaults to `Standard_A1`.
-
- CLI example `az vm list-sizes --location westus`
-
-- `async_resourcegroup_delete` (boolean) - If you want packer to delete the
- temporary resource group asynchronously set this value. It's a boolean
- value and defaults to false. **Important** Setting this true means that
- your builds are faster, however any failed deletes are not reported.
-
-- `managed_image_os_disk_snapshot_name` (string) - If
- managed\_image\_os\_disk\_snapshot\_name is set, a snapshot of the OS disk
- is created with the same name as this value before the VM is captured.
-
-- `managed_image_data_disk_snapshot_prefix` (string) - If
- managed\_image\_data\_disk\_snapshot\_prefix is set, snapshot of the data
- disk(s) is created with the same prefix as this value before the VM is
- captured.
-
-- `managed_image_zone_resilient` (bool) - Store the image in zone-resilient storage. You need to create it
- in a region that supports [availability zones](https://docs.microsoft.com/en-us/azure/availability-zones/az-overview).
+<%= partial "partials/builder/azure/arm/Config-not-required" %>
+<%= partial "partials/builder/azure/arm/ClientConfig-not-required" %>
## Basic Example
From 8b277f9626ca1b56e117e6315f6a1d492cfcb201 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Thu, 6 Jun 2019 16:46:33 +0200
Subject: [PATCH 33/97] docker: use auto-generated partials
---
.../{docker.html.md => docker.html.md.erb} | 100 +-----------------
1 file changed, 3 insertions(+), 97 deletions(-)
rename website/source/docs/builders/{docker.html.md => docker.html.md.erb} (66%)
diff --git a/website/source/docs/builders/docker.html.md b/website/source/docs/builders/docker.html.md.erb
similarity index 66%
rename from website/source/docs/builders/docker.html.md
rename to website/source/docs/builders/docker.html.md.erb
index e59e6f2c1..3834e0601 100644
--- a/website/source/docs/builders/docker.html.md
+++ b/website/source/docs/builders/docker.html.md.erb
@@ -138,107 +138,13 @@ standard [communicators](/docs/templates/communicator.html).
You must specify (only) one of `commit`, `discard`, or `export_path`.
-- `commit` (boolean) - If true, the container will be committed to an image
- rather than exported.
-- `discard` (boolean) - Throw away the container when the build is complete.
- This is useful for the [artifice
- post-processor](https://www.packer.io/docs/post-processors/artifice.html).
-
-- `export_path` (string) - The path where the final container will be
- exported as a tar file.
-
-- `image` (string) - The base image for the Docker container that will be
- started. This image will be pulled from the Docker registry if it doesn't
- already exist.
+<%= partial "partials/builder/docker/Config-required" %>
### Optional:
-- `author` (string) - Set the author (e-mail) of a commit.
-
-- `aws_access_key` (string) - The AWS access key used to communicate with
- AWS. [Learn how to set
- this.](/docs/builders/amazon.html#specifying-amazon-credentials)
-
-- `aws_secret_key` (string) - The AWS secret key used to communicate with
- AWS. [Learn how to set
- this.](/docs/builders/amazon.html#specifying-amazon-credentials)
-
-- `aws_token` (string) - The AWS access token to use. This is different from
- the access key and secret key. If you're not sure what this is, then you
- probably don't need it. This will also be read from the `AWS_SESSION_TOKEN`
- environmental variable.
-
-- `aws_profile` (string) - The AWS shared credentials profile used to
- communicate with AWS. [Learn how to set
- this.](/docs/builders/amazon.html#specifying-amazon-credentials)
-
-- `changes` (array of strings) - Dockerfile instructions to add to the
- commit. Example of instructions are `CMD`, `ENTRYPOINT`, `ENV`, and
- `EXPOSE`. Example: `[ "USER ubuntu", "WORKDIR /app", "EXPOSE 8080" ]`
-
-- `ecr_login` (boolean) - Defaults to false. If true, the builder will login
- in order to pull the image from [Amazon EC2 Container Registry
- (ECR)](https://aws.amazon.com/ecr/). The builder only logs in for the
- duration of the pull. If true `login_server` is required and `login`,
- `login_username`, and `login_password` will be ignored. For more
- information see the [section on ECR](#amazon-ec2-container-registry).
-
-- `exec_user` (string) - Username (UID) to run remote commands with. You can
- also set the group name/ID if you want: (UID or UID:GID).
- You may need this if you get permission errors trying to run the `shell` or
- other provisioners.
-
-- `login` (boolean) - Defaults to false. If true, the builder will login in
- order to pull the image. The builder only logs in for the duration of the
- pull. It always logs out afterwards. For log into ECR see `ecr_login`.
-
-- `login_username` (string) - The username to use to authenticate to login.
-
-- `login_password` (string) - The password to use to authenticate to login.
-
-- `login_server` (string) - The server address to login to.
-
-- `message` (string) - Set a message for the commit.
-
-- `privileged` (boolean) - If true, run the docker container with the
- `--privileged` flag. This defaults to false if not set.
-
-- `pull` (boolean) - If true, the configured image will be pulled using
- `docker pull` prior to use. Otherwise, it is assumed the image already
- exists and can be used. This defaults to true if not set.
-
-- `run_command` (array of strings) - An array of arguments to pass to
- `docker run` in order to run the container. By default this is set to
- `["-d", "-i", "-t", "--entrypoint=/bin/sh", "--", "{{.Image}}"]` if you are
- using a linux container, and
- `["-d", "-i", "-t", "--entrypoint=powershell", "--", "{{.Image}}"]` if you
- are running a windows container. {{.Image}} is a template variable that
- corresponds to the `image` template option. Passing the entrypoint option
- this way will make it the default entrypoint of the resulting image, so
- running `docker run -it --rm $IMAGE` will start the docker image from the
- `/bin/sh` shell interpreter; you could run a script or another shell by
- running `docker run -it --rm $IMAGE -c /bin/bash`. If your docker image
- embeds a binary intended to be run often, you should consider changing the
- default entrypoint to point to it.
-
-- `volumes` (map of strings to strings) - A mapping of additional volumes to
- mount into this container. The key of the object is the host path, the
- value is the container path.
-
-- `windows_container` (bool) - If "true", tells Packer that you are building a
- Windows container running on a windows host. This is necessary for building
- Windows containers, because our normal docker bindings do not work for them.
-
-- `container_dir` (string) - The directory inside container to mount temp
- directory from host server for work [file
- provisioner](/docs/provisioners/file.html). This defaults to
- `c:/packer-files` on windows and `/packer-files` on other systems.
-
-- `fix_upload_owner` (boolean) - If true, files uploaded to the container
- will be owned by the user the container is running as. If false, the owner
- will depend on the version of docker installed in the system. Defaults to
- true.
+<%= partial "partials/builder/docker/AwsAccessConfig-not-required" %>
+<%= partial "partials/builder/docker/Config-not-required" %>
## Using the Artifact: Export
From 509df6ea0fd22eaa8f9ed75b4d5f93d14549ebe3 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Thu, 6 Jun 2019 16:46:45 +0200
Subject: [PATCH 34/97] communitator: use auto-generated partials
---
.../docs/templates/communicator.html.md | 222 ------------------
.../docs/templates/communicator.html.md.erb | 85 +++++++
2 files changed, 85 insertions(+), 222 deletions(-)
delete mode 100644 website/source/docs/templates/communicator.html.md
create mode 100644 website/source/docs/templates/communicator.html.md.erb
diff --git a/website/source/docs/templates/communicator.html.md b/website/source/docs/templates/communicator.html.md
deleted file mode 100644
index 4114233fa..000000000
--- a/website/source/docs/templates/communicator.html.md
+++ /dev/null
@@ -1,222 +0,0 @@
----
-description: |
- Communicators are the mechanism Packer uses to upload files, execute scripts,
- etc. with the machine being created.
-layout: docs
-page_title: 'Communicators - Templates'
-sidebar_current: 'docs-templates-communicators'
----
-
-# Template Communicators
-
-Communicators are the mechanism Packer uses to upload files, execute scripts,
-etc. with the machine being created.
-
-Communicators are configured within the
-[builder](/docs/templates/builders.html) section. Packer currently supports
-three kinds of communicators:
-
-- `none` - No communicator will be used. If this is set, most provisioners
- also can't be used.
-
-- `ssh` - An SSH connection will be established to the machine. This is
- usually the default.
-
-- `winrm` - A WinRM connection will be established.
-
-In addition to the above, some builders have custom communicators they can use.
-For example, the Docker builder has a "docker" communicator that uses
-`docker exec` and `docker cp` to execute scripts and copy files.
-
-## Using a Communicator
-
-By default, the SSH communicator is usually used. Additional configuration may
-not even be necessary, since some builders such as Amazon automatically
-configure everything.
-
-However, to specify a communicator, you set the `communicator` key within a
-build. Multiple builds can have different communicators. Example:
-
-``` json
-{
- "builders": [
- {
- "type": "amazon-ebs",
- "communicator": "ssh"
- }
- ]
-}
-```
-
-After specifying the `communicator`, you can specify a number of other
-configuration parameters for that communicator. These are documented below.
-
-## SSH Communicator
-
-The SSH communicator connects to the host via SSH. If you have an SSH agent
-configured on the host running Packer, and SSH agent authentication is enabled
-in the communicator config, Packer will automatically forward the SSH agent to
-the remote host.
-
-The SSH communicator has the following options:
-
-- `ssh_agent_auth` (boolean) - If `true`, the local SSH agent will be used to
- authenticate connections to the remote host. Defaults to `false`.
-
-- `ssh_bastion_agent_auth` (boolean) - If `true`, the local SSH agent will be
- used to authenticate with the bastion host. Defaults to `false`.
-
-- `ssh_bastion_host` (string) - A bastion host to use for the actual SSH
- connection.
-
-- `ssh_bastion_password` (string) - The password to use to authenticate with
- the bastion host.
-
-- `ssh_bastion_port` (number) - The port of the bastion host. Defaults to
- `22`.
-
-- `ssh_bastion_private_key_file` (string) - Path to a PEM encoded private key
- file to use to authenticate with the bastion host. The `~` can be used in
- path and will be expanded to the home directory of current user.
-
-- `ssh_bastion_username` (string) - The username to connect to the bastion
- host.
-
-- `ssh_clear_authorized_keys` (boolean) - If true, Packer will attempt to
- remove its temporary key from `~/.ssh/authorized_keys` and
- `/root/.ssh/authorized_keys`. This is a mostly cosmetic option, since
- Packer will delete the temporary private key from the host system
- regardless of whether this is set to true (unless the user has set the
- `-debug` flag). Defaults to "false"; currently only works on guests with
- `sed` installed.
-
-- `ssh_disable_agent_forwarding` (boolean) - If true, SSH agent forwarding
- will be disabled. Defaults to `false`.
-
-- `ssh_file_transfer_method` (`scp` or `sftp`) - How to transfer files,
- Secure copy (default) or SSH File Transfer Protocol.
-
-- `ssh_handshake_attempts` (number) - The number of handshakes to attempt
- with SSH once it can connect. This defaults to `10`.
-
-- `ssh_host` (string) - The address to SSH to. This usually is automatically
- configured by the builder.
-
-- `ssh_keep_alive_interval` (string) - How often to send "keep alive"
- messages to the server. Set to a negative value (`-1s`) to disable. Example
- value: `10s`. Defaults to `5s`.
-
-- `ssh_password` (string) - A plaintext password to use to authenticate with
- SSH.
-
-- `ssh_port` (number) - The port to connect to SSH. This defaults to `22`.
-
-- `ssh_private_key_file` (string) - Path to a PEM encoded private key file to
- use to authenticate with SSH. The `~` can be used in path and will be
- expanded to the home directory of current user.
-
-- `ssh_proxy_host` (string) - A SOCKS proxy host to use for SSH connection
-
-- `ssh_proxy_password` (string) - The password to use to authenticate with
- the proxy server. Optional.
-
-- `ssh_proxy_port` (number) - A port of the SOCKS proxy. Defaults to `1080`.
-
-- `ssh_proxy_username` (string) - The username to authenticate with the proxy
- server. Optional.
-
-- `ssh_pty` (boolean) - If `true`, a PTY will be requested for the SSH
- connection. This defaults to `false`.
-
-- `ssh_read_write_timeout` (string) - The amount of time to wait for a remote
- command to end. This might be useful if, for example, packer hangs on a
- connection after a reboot. Example: `5m`. Disabled by default.
-
-- `ssh_timeout` (string) - The time to wait for SSH to become available.
- Packer uses this to determine when the machine has booted so this is
- usually quite long. Example value: `10m`.
-
-- `ssh_username` (string) - The username to connect to SSH with. Required if
- using SSH.
-
-### SSH Communicator Details
-
-Packer will only use one authentication method, either `publickey` or if
-`ssh_password` is used packer will offer `password` and `keyboard-interactive`
-both sending the password. In other words Packer will not work with *sshd*
-configured with more than one configured authentication method using
-`AuthenticationMethods`.
-
-Packer supports the following ciphers:
-
-- aes128-ctr
-- aes192-ctr
-- aes256-ctr
-- arcfour128
-- arcfour256
-- arcfour
-- `es128-gcm@openssh.com`
-- `acha20-poly1305@openssh.com`
-
-And the following MACs:
-
-- hmac-sha1
-- hmac-sha1-96
-- hmac-sha2-256
-- `hmac-sha2-256-etm@openssh.com`
-
-## WinRM Communicator
-
-The WinRM communicator has the following options.
-
-- `winrm_host` (string) - The address for WinRM to connect to.
-
- NOTE: If using an Amazon EBS builder, you can specify the interface WinRM
- connects to via
- [`ssh_interface`](https://www.packer.io/docs/builders/amazon-ebs.html#ssh_interface)
-
-- `winrm_insecure` (boolean) - If `true`, do not check server certificate
- chain and host name.
-
-- `winrm_password` (string) - The password to use to connect to WinRM.
-
-- `winrm_port` (number) - The WinRM port to connect to. This defaults to
- `5985` for plain unencrypted connection and `5986` for SSL when
- `winrm_use_ssl` is set to true.
-
-- `winrm_timeout` (string) - The amount of time to wait for WinRM to become
- available. This defaults to `30m` since setting up a Windows machine
- generally takes a long time.
-
-- `winrm_use_ntlm` (boolean) - If `true`, NTLMv2 authentication (with session
- security) will be used for WinRM, rather than default (basic
- authentication), removing the requirement for basic authentication to be
- enabled within the target guest. Further reading for remote connection
- authentication can be found
- [here](https://msdn.microsoft.com/en-us/library/aa384295(v=vs.85).aspx).
-
-- `winrm_use_ssl` (boolean) - If `true`, use HTTPS for WinRM.
-
-- `winrm_username` (string) - The username to use to connect to WinRM.
-
-## Pausing Before Connecting
-We recommend that you enable SSH or WinRM as the very last step in your
-guest's bootstrap script, but sometimes you may have a race condition where
-you need Packer to wait before attempting to connect to your guest.
-
-If you end up in this situation, you can use the template option
-`pause_before_connecting`. By default, there is no pause. For example:
-
-```
-{
- "communicator": "ssh",
- "ssh_username": "myuser",
- "pause_before_connecting": "10m"
-}
-```
-
-In this example, Packer will check whether it can connect, as normal. But once
-a connection attempt is successful, it will disconnect and then wait 10 minutes
-before connecting to the guest and beginning provisioning.
-
-
diff --git a/website/source/docs/templates/communicator.html.md.erb b/website/source/docs/templates/communicator.html.md.erb
new file mode 100644
index 000000000..6d801e467
--- /dev/null
+++ b/website/source/docs/templates/communicator.html.md.erb
@@ -0,0 +1,85 @@
+---
+description: |
+ Communicators are the mechanism Packer uses to upload files, execute scripts,
+ etc. with the machine being created.
+layout: docs
+page_title: 'Communicators - Templates'
+sidebar_current: 'docs-templates-communicators'
+---
+
+# Template Communicators
+
+Communicators are the mechanism Packer uses to upload files, execute scripts,
+etc. with the machine being created.
+
+Communicators are configured within the
+[builder](/docs/templates/builders.html) section.
+
+All communicators have the following options:
+
+<%= partial "partials/helper/communicator/Config-not-required" %>
+
+## Using a Communicator
+
+By default, the SSH communicator is usually used. Additional configuration may
+not even be necessary, since some builders such as Amazon automatically
+configure everything.
+
+However, to specify a communicator, you set the `communicator` key within a
+build. Multiple builds can have different communicators. Example:
+
+``` json
+{
+ "builders": [
+ {
+ "type": "amazon-ebs",
+ "communicator": "ssh"
+ }
+ ]
+}
+```
+
+After specifying the `communicator`, you can specify a number of other
+configuration parameters for that communicator. These are documented below.
+
+
+## SSH Communicator
+
+The SSH communicator connects to the host via SSH. If you have an SSH agent
+configured on the host running Packer, and SSH agent authentication is enabled
+in the communicator config, Packer will automatically forward the SSH agent to
+the remote host.
+
+The SSH communicator has the following options:
+
+<%= partial "partials/helper/communicator/SSH-not-required" %>
+
+### SSH Communicator Details
+
+Packer will only use one authentication method, either `publickey` or if
+`ssh_password` is used packer will offer `password` and `keyboard-interactive`
+both sending the password. In other words Packer will not work with *sshd*
+configured with more than one configured authentication method using
+`AuthenticationMethods`.
+
+Packer supports the following ciphers:
+
+- aes128-ctr
+- aes192-ctr
+- aes256-ctr
+- arcfour128
+- arcfour256
+- arcfour
+- `es128-gcm@openssh.com`
+- `acha20-poly1305@openssh.com`
+
+And the following MACs:
+
+- hmac-sha1
+- hmac-sha1-96
+- hmac-sha2-256
+- `hmac-sha2-256-etm@openssh.com`
+
+## WinRM Communicator
+
+<%= partial "partials/helper/communicator/WinRM-not-required" %>
From 4ab54714909694630e8a77b6346724aaf1843fb2 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Thu, 6 Jun 2019 16:47:03 +0200
Subject: [PATCH 35/97] Makefile: install-gen-deps now installs
./cmd/struct-markdown
---
Makefile | 1 +
1 file changed, 1 insertion(+)
diff --git a/Makefile b/Makefile
index f9518936d..f1c18e11f 100644
--- a/Makefile
+++ b/Makefile
@@ -48,6 +48,7 @@ install-gen-deps: ## Install dependencies for code generation
@go get golang.org/x/tools/cmd/goimports
@go get -u github.com/mna/pigeon
@go get github.com/alvaroloes/enumer
+ @go install ./cmd/struct-markdown
dev: ## Build and install a development build
@grep 'const VersionPrerelease = ""' version/version.go > /dev/null ; if [ $$? -eq 0 ]; then \
From f1fd23c65af0a83e3df29fdef32112c775ff3c28 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Thu, 6 Jun 2019 18:19:41 +0200
Subject: [PATCH 36/97] Update appveyor.yml
remove unused get of
github.com/mitchellh/gox & golang.org/x/tools/cmd/stringer
---
appveyor.yml | 2 --
1 file changed, 2 deletions(-)
diff --git a/appveyor.yml b/appveyor.yml
index c63779bfd..417d524aa 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -20,8 +20,6 @@ install:
- echo %Path%
- go version
- go env
- - go get github.com/mitchellh/gox
- - go get golang.org/x/tools/cmd/stringer
build_script:
- git rev-parse HEAD
From 82525e4d2000d2d04673ce85617041d5b67c14f4 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 12 Jun 2019 14:49:28 +0200
Subject: [PATCH 37/97] use parials in yandex docs after master merge
---
website/source/docs/builders/yandex.html.md | 144 ------------------
.../source/docs/builders/yandex.html.md.erb | 67 ++++++++
.../yandex/_Config-not-required.html.md | 7 +-
3 files changed, 72 insertions(+), 146 deletions(-)
delete mode 100644 website/source/docs/builders/yandex.html.md
create mode 100644 website/source/docs/builders/yandex.html.md.erb
diff --git a/website/source/docs/builders/yandex.html.md b/website/source/docs/builders/yandex.html.md
deleted file mode 100644
index e28cde4e7..000000000
--- a/website/source/docs/builders/yandex.html.md
+++ /dev/null
@@ -1,144 +0,0 @@
----
-description: |
- The yandex Packer builder is able to create images for use with
- Yandex.Cloud based on existing images.
-layout: docs
-page_title: 'Yandex Compute - Builders'
-sidebar_current: 'docs-builders-yandex'
----
-
-# Yandex Compute Builder
-
-Type: `yandex`
-
-The `yandex` Packer builder is able to create
-[images](https://cloud.yandex.com/docs/compute/concepts/images) for use with
-[Yandex Compute Cloud](https://cloud.yandex.com/docs/compute/)
-based on existing images.
-
-## Authentication
-
-Yandex.Cloud services authentication requires one of the following security credentials:
-
-- OAuth token
-- File with Service Account Key
-
-
-### Authentication Using Token
-
-To authenticate with an OAuth token only `token` config key is needed.
-Or use the `YC_TOKEN` environment variable.
-
-
-### Authentication Using Service Account Key File
-
-To authenticate with a service account credential, only `service_account_key_file` is needed.
-Or use the `YC_SERVICE_ACCOUNT_KEY_FILE` environment variable.
-
-
-## Basic Example
-
-``` json
-{
- "type": "yandex",
- "token": "YOUR OAUTH TOKEN",
- "folder_id": "YOUR FOLDER ID",
- "source_image_family": "ubuntu-1804-lts",
- "ssh_username": "ubuntu",
- "use_ipv4_nat": "true"
-}
-```
-
-## Configuration Reference
-
-Configuration options are organized below into two categories: required and
-optional. Within each category, the available options are alphabetized and
-described.
-
-In addition to the options listed here, a [communicator](/docs/templates/communicator.html)
-can be configured for this builder.
-
-### Required:
-
-- `folder_id` (string) - The folder ID that will be used to launch instances and store images.
- Alternatively you may set value by environment variable `YC_FOLDER_ID`.
-
-- `token` (string) - OAuth token to use to authenticate to Yandex.Cloud. Alternatively you may set
- value by environment variable `YC_TOKEN`.
-
-- `source_image_family` (string) - The source image family to create the new image
- from. You can also specify `source_image_id` instead. Just one of a `source_image_id` or
- `source_image_family` must be specified. Example: `ubuntu-1804-lts`
-
-### Optional:
-
-- `disk_name` (string) - The name of the disk, if unset the instance name
- will be used.
-
-- `disk_size_gb` (number) - The size of the disk in GB. This defaults to `10`, which is 10GB.
-
-- `disk_type` (string) - Specify disk type for the launched instance. Defaults to `network-hdd`.
-
-- `endpoint` (string) - Non standard api endpoint URL.
-
-- `image_description` (string) - The description of the resulting image.
-
-- `image_family` (string) - The family name of the resulting image.
-
-- `image_labels` (object of key/value strings) - Key/value pair labels to
- apply to the created image.
-
-- `image_name` (string) - The unique name of the resulting image. Defaults to
- `packer-{{timestamp}}`.
-
-- `image_product_ids` (list) - License IDs that indicate which licenses are attached to resulting image.
-
-- `instance_cores` (number) - The number of cores available to the instance.
-
-- `instance_mem_gb` (number) - The amount of memory available to the instance, specified in gigabytes.
-
-- `instance_name` (string) - The name assigned to the instance.
-
-- `labels` (object of key/value strings) - Key/value pair labels to apply to
- the launched instance.
-
-- `metadata` (object of key/value strings) - Metadata applied to the launched
- instance.
-
-- `platform_id` (string) - Identifier of the hardware platform configuration for the instance. This defaults to `standard-v1`.
-
-- `preemptible` (boolean) - Launch a preemptible instance. This defaults to `false`.
-
-- `serial_log_file` (string) - File path to save serial port output of the launched instance.
-
-- `service_account_key_file` (string) - Path to file with Service Account key in json format. This
- is an alternative method to authenticate to Yandex.Cloud. Alternatively you may set environment variable
- `YC_SERVICE_ACCOUNT_KEY_FILE`.
-
-- `source_image_folder_id` (string) - The ID of the folder containing the source image.
-
-- `source_image_id` (string) - The source image ID to use to create the new image
- from.
-
-- `source_image_family` (string) - The source image family to create
- the new image from. The image family always returns its latest image that
- is not deprecated. Example: `ubuntu-1804-lts`.
-
-- `state_timeout` (string) - The time to wait for instance state changes.
- Defaults to `5m`.
-
-- `subnet_id` (string) - The Yandex VPC subnet id to use for
- the launched instance. Note, the zone of the subnet must match the
- `zone` in which the VM is launched.
-
-- `use_internal_ip` (boolean) - If true, use the instance's internal IP address
- instead of its external IP during building.
-
-- `use_ipv4_nat` (boolean) - If set to `true`, then launched instance will have external internet
- access.
-
-- `use_ipv6` (boolean) - Set to `true` to enable IPv6 for the instance being
- created. This defaults to `false`, or not enabled.
--> **Note:** ~> Usage of IPv6 will be available in the future.
-
-- `zone` (string) - The name of the zone to launch the instance. This defaults to `ru-central1-a`.
diff --git a/website/source/docs/builders/yandex.html.md.erb b/website/source/docs/builders/yandex.html.md.erb
new file mode 100644
index 000000000..cdd7b4718
--- /dev/null
+++ b/website/source/docs/builders/yandex.html.md.erb
@@ -0,0 +1,67 @@
+---
+description: |
+ The yandex Packer builder is able to create images for use with
+ Yandex.Cloud based on existing images.
+layout: docs
+page_title: 'Yandex Compute - Builders'
+sidebar_current: 'docs-builders-yandex'
+---
+
+# Yandex Compute Builder
+
+Type: `yandex`
+
+The `yandex` Packer builder is able to create
+[images](https://cloud.yandex.com/docs/compute/concepts/images) for use with
+[Yandex Compute Cloud](https://cloud.yandex.com/docs/compute/)
+based on existing images.
+
+## Authentication
+
+Yandex.Cloud services authentication requires one of the following security credentials:
+
+- OAuth token
+- File with Service Account Key
+
+
+### Authentication Using Token
+
+To authenticate with an OAuth token only `token` config key is needed.
+Or use the `YC_TOKEN` environment variable.
+
+
+### Authentication Using Service Account Key File
+
+To authenticate with a service account credential, only `service_account_key_file` is needed.
+Or use the `YC_SERVICE_ACCOUNT_KEY_FILE` environment variable.
+
+
+## Basic Example
+
+``` json
+{
+ "type": "yandex",
+ "token": "YOUR OAUTH TOKEN",
+ "folder_id": "YOUR FOLDER ID",
+ "source_image_family": "ubuntu-1804-lts",
+ "ssh_username": "ubuntu",
+ "use_ipv4_nat": "true"
+}
+```
+
+## Configuration Reference
+
+Configuration options are organized below into two categories: required and
+optional. Within each category, the available options are alphabetized and
+described.
+
+In addition to the options listed here, a [communicator](/docs/templates/communicator.html)
+can be configured for this builder.
+
+### Required:
+
+<%= partial "partials/builder/yandex/Config-required" %>
+
+### Optional:
+
+<%= partial "partials/builder/yandex/Config-not-required" %>
diff --git a/website/source/partials/builder/yandex/_Config-not-required.html.md b/website/source/partials/builder/yandex/_Config-not-required.html.md
index 2deb12bba..1e8e94fc6 100644
--- a/website/source/partials/builder/yandex/_Config-not-required.html.md
+++ b/website/source/partials/builder/yandex/_Config-not-required.html.md
@@ -36,8 +36,11 @@
- `platform_id` (string) - Identifier of the hardware platform configuration for the instance. This defaults to standard-v1.
-- `metadata` (map[string]string) - Metadata applied to the launched
- instance.
+- `metadata` (map[string]string) - Metadata applied to the launched instance.
+
+- `metadata_from_file` (map[string]string) - Metadata applied to the launched instance. Value are file paths.
+
+- `preemptible` (bool) - Launch a preemptible instance. This defaults to `false`.
- `serial_log_file` (string) - File path to save serial port output of the launched instance.
From a3e3c313a928fc2979efad1c15d9de0e629c715f Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 12 Jun 2019 15:18:39 +0200
Subject: [PATCH 38/97] docs: use partials in googlecompute after master merge
---
builder/googlecompute/config.go | 38 ++++-
...pute.html.md => googlecompute.html.md.erb} | 149 +----------------
.../_Config-not-required.html.md | 154 +++++++++---------
.../googlecompute/_Config-required.html.md | 21 ++-
4 files changed, 123 insertions(+), 239 deletions(-)
rename website/source/docs/builders/{googlecompute.html.md => googlecompute.html.md.erb} (57%)
diff --git a/builder/googlecompute/config.go b/builder/googlecompute/config.go
index e174b27c1..c9433be19 100644
--- a/builder/googlecompute/config.go
+++ b/builder/googlecompute/config.go
@@ -34,9 +34,9 @@ type Config struct {
// The project ID that will be used to launch instances and store images.
ProjectId string `mapstructure:"project_id" required:"true"`
// Full or partial URL of the guest accelerator type. GPU accelerators can
- // only be used with "on_host_maintenance": "TERMINATE" option set.
+ // only be used with `"on_host_maintenance": "TERMINATE"` option set.
// Example:
- // "projects/project_id/zones/europe-west1-b/acceleratorTypes/nvidia-tesla-k80"
+ // `"projects/project_id/zones/europe-west1-b/acceleratorTypes/nvidia-tesla-k80"`
AcceleratorType string `mapstructure:"accelerator_type" required:"false"`
// Number of guest accelerator cards to add to the launched instance.
AcceleratorCount int64 `mapstructure:"accelerator_count" required:"false"`
@@ -60,6 +60,16 @@ type Config struct {
// The description of the resulting image.
ImageDescription string `mapstructure:"image_description" required:"false"`
// Image encryption key to apply to the created image. Possible values:
+ // * kmsKeyName - The name of the encryption key that is stored in Google Cloud KMS.
+ // * RawKey: - A 256-bit customer-supplied encryption key, encodes in RFC 4648 base64.
+ //
+ // example:
+ //
+ // ``` json
+ // {
+ // "kmsKeyName": "projects/${project}/locations/${region}/keyRings/computeEngine/cryptoKeys/computeEngine/cryptoKeyVersions/4"
+ // }
+ // ```
ImageEncryptionKey *compute.CustomerEncryptionKey `mapstructure:"image_encryption_key" required:"false"`
// The name of the image family to which the resulting image belongs. You
// can create disks by specifying an image family instead of a specific
@@ -83,7 +93,8 @@ type Config struct {
MetadataFiles map[string]string `mapstructure:"metadata_files"`
// A Minimum CPU Platform for VM Instance. Availability and default CPU
// platforms vary across zones, based on the hardware available in each GCP
- // zone. Details
+ // zone.
+ // [Details](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
MinCpuPlatform string `mapstructure:"min_cpu_platform" required:"false"`
// The Google Compute network id or URL to use for the launched instance.
// Defaults to "default". If the value is not a URL, it will be
@@ -97,10 +108,12 @@ type Config struct {
// If true, the instance will not have an external IP. use_internal_ip must
// be true if this property is true.
OmitExternalIP bool `mapstructure:"omit_external_ip" required:"false"`
- // Sets Host Maintenance Option. Valid choices are MIGRATE and TERMINATE.
- // Please see GCE Instance Scheduling Options, as not all machine_types
- // support MIGRATE (i.e. machines with GPUs). If preemptible is true this
- // can only be TERMINATE. If preemptible is false, it defaults to MIGRATE
+ // Sets Host Maintenance Option. Valid choices are `MIGRATE` and
+ // `TERMINATE`. Please see [GCE Instance Scheduling
+ // Options](https://cloud.google.com/compute/docs/instances/setting-instance-scheduling-options),
+ // as not all machine\_types support `MIGRATE` (i.e. machines with GPUs).
+ // If preemptible is true this can only be `TERMINATE`. If preemptible is
+ // false, it defaults to `MIGRATE`
OnHostMaintenance string `mapstructure:"on_host_maintenance" required:"false"`
// If true, launch a preemptible instance.
Preemptible bool `mapstructure:"preemptible" required:"false"`
@@ -109,7 +122,16 @@ type Config struct {
// The region in which to launch the instance. Defaults to the region
// hosting the specified zone.
Region string `mapstructure:"region" required:"false"`
- // The service account scopes for launched instance. Defaults to:
+ // The service account scopes for launched
+ // instance. Defaults to:
+ //
+ // ``` json
+ // [
+ // "https://www.googleapis.com/auth/userinfo.email",
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // ```
Scopes []string `mapstructure:"scopes" required:"false"`
// The service account to be used for launched instance. Defaults to the
// project's default service account unless disable_default_service_account
diff --git a/website/source/docs/builders/googlecompute.html.md b/website/source/docs/builders/googlecompute.html.md.erb
similarity index 57%
rename from website/source/docs/builders/googlecompute.html.md
rename to website/source/docs/builders/googlecompute.html.md.erb
index 1c47ae705..31ff90332 100644
--- a/website/source/docs/builders/googlecompute.html.md
+++ b/website/source/docs/builders/googlecompute.html.md.erb
@@ -207,156 +207,11 @@ builder.
### Required:
-- `project_id` (string) - The project ID that will be used to launch
- instances and store images.
-
-- `source_image` (string) - The source image to use to create the new image
- from. You can also specify `source_image_family` instead. If both
- `source_image` and `source_image_family` are specified, `source_image`
- takes precedence. Example: `"debian-8-jessie-v20161027"`
-
-- `source_image_family` (string) - The source image family to use to create
- the new image from. The image family always returns its latest image that
- is not deprecated. Example: `"debian-8"`.
-
-- `zone` (string) - The zone in which to launch the instance used to create
- the image. Example: `"us-central1-a"`
+<%= partial "partials/builder/googlecompute/Config-required" %>
### Optional:
-- `account_file` (string) - The JSON file containing your account
- credentials. Not required if you run Packer on a GCE instance with a
- service account. Instructions for creating the file or using service
- accounts are above.
-
-- `accelerator_count` (number) - Number of guest accelerator cards to add to
- the launched instance.
-
-- `accelerator_type` (string) - Full or partial URL of the guest accelerator
- type. GPU accelerators can only be used with
- `"on_host_maintenance": "TERMINATE"` option set. Example:
- `"projects/project_id/zones/europe-west1-b/acceleratorTypes/nvidia-tesla-k80"`
-
-- `address` (string) - The name of a pre-allocated static external IP
- address. Note, must be the name and not the actual IP address.
-
-- `disable_default_service_account` (bool) - If true, the default service
- account will not be used if `service_account_email` is not specified. Set
- this value to true and omit `service_account_email` to provision a VM with
- no service account.
-
-- `disk_name` (string) - The name of the disk, if unset the instance name
- will be used.
-
-- `disk_size` (number) - The size of the disk in GB. This defaults to `10`,
- which is 10GB.
-
-- `disk_type` (string) - Type of disk used to back your instance, like
- `pd-ssd` or `pd-standard`. Defaults to `pd-standard`.
-
-- `image_description` (string) - The description of the resulting image.
-
-- `image_family` (string) - The name of the image family to which the
- resulting image belongs. You can create disks by specifying an image family
- instead of a specific image name. The image family always returns its
- latest image that is not deprecated.
-
-- `image_labels` (object of key/value strings) - Key/value pair labels to
- apply to the created image.
-
-- `image_licenses` (array of strings) - Licenses to apply to the created
- image.
-
-- `image_name` (string) - The unique name of the resulting image. Defaults to
- `"packer-{{timestamp}}"`.
-
-- `image_encryption_key` (object of encryption key) - Image encryption key to apply to the created image. Possible values:
- * kmsKeyName - The name of the encryption key that is stored in Google Cloud KMS.
- * RawKey: - A 256-bit customer-supplied encryption key, encodes in RFC 4648 base64.
-
- example:
- ``` json
- {
- "kmsKeyName": "projects/${project}/locations/${region}/keyRings/computeEngine/cryptoKeys/computeEngine/cryptoKeyVersions/4"
- }
- ```
-
-- `instance_name` (string) - A name to give the launched instance. Beware
- that this must be unique. Defaults to `"packer-{{uuid}}"`.
-
-- `labels` (object of key/value strings) - Key/value pair labels to apply to
- the launched instance.
-
-- `machine_type` (string) - The machine type. Defaults to `"n1-standard-1"`.
-
-- `metadata` (object of key/value strings) - Metadata applied to the launched
- instance.
-
-- `min_cpu_platform` (string) - A Minimum CPU Platform for VM Instance.
- Availability and default CPU platforms vary across zones, based on the
- hardware available in each GCP zone.
- [Details](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
-
-- `network` (string) - The Google Compute network id or URL to use for the
- launched instance. Defaults to `"default"`. If the value is not a URL, it
- will be interpolated to
- `projects/((network_project_id))/global/networks/((network))`. This value
- is not required if a `subnet` is specified.
-
-- `network_project_id` (string) - The project ID for the network and
- subnetwork to use for launched instance. Defaults to `project_id`.
-
-- `omit_external_ip` (boolean) - If true, the instance will not have an
- external IP. `use_internal_ip` must be true if this property is true.
-
-- `on_host_maintenance` (string) - Sets Host Maintenance Option. Valid
- choices are `MIGRATE` and `TERMINATE`. Please see [GCE Instance Scheduling
- Options](https://cloud.google.com/compute/docs/instances/setting-instance-scheduling-options),
- as not all machine\_types support `MIGRATE` (i.e. machines with GPUs). If
- preemptible is true this can only be `TERMINATE`. If preemptible is false,
- it defaults to `MIGRATE`
-
-- `preemptible` (boolean) - If true, launch a preemptible instance.
-
-- `region` (string) - The region in which to launch the instance. Defaults to
- the region hosting the specified `zone`.
-
-- `service_account_email` (string) - The service account to be used for
- launched instance. Defaults to the project's default service account unless
- `disable_default_service_account` is true.
-
-- `scopes` (array of strings) - The service account scopes for launched
- instance. Defaults to:
-
- ``` json
- [
- "https://www.googleapis.com/auth/userinfo.email",
- "https://www.googleapis.com/auth/compute",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- ```
-
-- `source_image_project_id` (string) - The project ID of the project
- containing the source image.
-
-- `startup_script_file` (string) - The path to a startup script to run on the
- VM from which the image will be made.
-
-- `state_timeout` (string) - The time to wait for instance state changes.
- Defaults to `"5m"`.
-
-- `subnetwork` (string) - The Google Compute subnetwork id or URL to use for
- the launched instance. Only required if the `network` has been created with
- custom subnetting. Note, the region of the subnetwork must match the
- `region` or `zone` in which the VM is launched. If the value is not a URL,
- it will be interpolated to
- `projects/((network_project_id))/regions/((region))/subnetworks/((subnetwork))`
-
-- `tags` (array of strings) - Assign network tags to apply firewall rules to
- VM instance.
-
-- `use_internal_ip` (boolean) - If true, use the instance's internal IP
- instead of its external IP during building.
+<%= partial "partials/builder/googlecompute/Config-not-required" %>
## Startup Scripts
diff --git a/website/source/partials/builder/googlecompute/_Config-not-required.html.md b/website/source/partials/builder/googlecompute/_Config-not-required.html.md
index b1d5ccfe8..13aecab71 100644
--- a/website/source/partials/builder/googlecompute/_Config-not-required.html.md
+++ b/website/source/partials/builder/googlecompute/_Config-not-required.html.md
@@ -1,34 +1,29 @@
-- `account_file` (string) - The JSON file containing your account
- credentials. Not required if you run Packer on a GCE instance with a
- service account. Instructions for creating the file or using service
- accounts are above.
+- `account_file` (string) - The JSON file containing your account credentials. Not required if you
+ run Packer on a GCE instance with a service account. Instructions for
+ creating the file or using service accounts are above.
-- `accelerator_type` (string) - Full or partial URL of the guest accelerator
- type. GPU accelerators can only be used with
- "on_host_maintenance": "TERMINATE" option set. Example:
- "projects/project_id/zones/europe-west1-b/acceleratorTypes/nvidia-tesla-k80"
+- `accelerator_type` (string) - Full or partial URL of the guest accelerator type. GPU accelerators can
+ only be used with `"on_host_maintenance": "TERMINATE"` option set.
+ Example:
+ `"projects/project_id/zones/europe-west1-b/acceleratorTypes/nvidia-tesla-k80"`
-- `accelerator_count` (int64) - Number of guest accelerator cards to add to
- the launched instance.
+- `accelerator_count` (int64) - Number of guest accelerator cards to add to the launched instance.
-- `address` (string) - The name of a pre-allocated static external IP
- address. Note, must be the name and not the actual IP address.
+- `address` (string) - The name of a pre-allocated static external IP address. Note, must be
+ the name and not the actual IP address.
-- `disable_default_service_account` (bool) - If true, the default service
- account will not be used if service_account_email is not specified. Set
- this value to true and omit service_account_email to provision a VM with
- no service account.
+- `disable_default_service_account` (bool) - If true, the default service account will not be used if
+ service_account_email is not specified. Set this value to true and omit
+ service_account_email to provision a VM with no service account.
-- `disk_name` (string) - The name of the disk, if unset the instance name
- will be used.
+- `disk_name` (string) - The name of the disk, if unset the instance name will be used.
-- `disk_size` (int64) - The size of the disk in GB. This defaults to 10,
- which is 10GB.
+- `disk_size` (int64) - The size of the disk in GB. This defaults to 10, which is 10GB.
-- `disk_type` (string) - Type of disk used to back your instance, like
- pd-ssd or pd-standard. Defaults to pd-standard.
+- `disk_type` (string) - Type of disk used to back your instance, like pd-ssd or pd-standard.
+ Defaults to pd-standard.
- `image_name` (string) - The unique name of the resulting image. Defaults to
"packer-{{timestamp}}".
@@ -36,84 +31,97 @@
- `image_description` (string) - The description of the resulting image.
- `image_encryption_key` (*compute.CustomerEncryptionKey) - Image encryption key to apply to the created image. Possible values:
+ * kmsKeyName - The name of the encryption key that is stored in Google Cloud KMS.
+ * RawKey: - A 256-bit customer-supplied encryption key, encodes in RFC 4648 base64.
-- `image_family` (string) - The name of the image family to which the
- resulting image belongs. You can create disks by specifying an image family
- instead of a specific image name. The image family always returns its
- latest image that is not deprecated.
+ example:
-- `image_labels` (map[string]string) - Key/value pair labels to
- apply to the created image.
+ ``` json
+ {
+ "kmsKeyName": "projects/${project}/locations/${region}/keyRings/computeEngine/cryptoKeys/computeEngine/cryptoKeyVersions/4"
+ }
+ ```
-- `image_licenses` ([]string) - Licenses to apply to the created
- image.
+- `image_family` (string) - The name of the image family to which the resulting image belongs. You
+ can create disks by specifying an image family instead of a specific
+ image name. The image family always returns its latest image that is not
+ deprecated.
-- `instance_name` (string) - A name to give the launched instance. Beware
- that this must be unique. Defaults to "packer-{{uuid}}".
+- `image_labels` (map[string]string) - Key/value pair labels to apply to the created image.
-- `labels` (map[string]string) - Key/value pair labels to apply to
- the launched instance.
+- `image_licenses` ([]string) - Licenses to apply to the created image.
+
+- `instance_name` (string) - A name to give the launched instance. Beware that this must be unique.
+ Defaults to "packer-{{uuid}}".
+
+- `labels` (map[string]string) - Key/value pair labels to apply to the launched instance.
- `machine_type` (string) - The machine type. Defaults to "n1-standard-1".
-- `metadata` (map[string]string) - Metadata applied to the launched
- instance.
+- `metadata` (map[string]string) - Metadata applied to the launched instance.
-- `min_cpu_platform` (string) - A Minimum CPU Platform for VM Instance.
- Availability and default CPU platforms vary across zones, based on the
- hardware available in each GCP zone.
- Details
+- `metadata_files` (map[string]string) - Metadata applied to the launched instance. Values are files.
-- `network` (string) - The Google Compute network id or URL to use for the
- launched instance. Defaults to "default". If the value is not a URL, it
- will be interpolated to
+- `min_cpu_platform` (string) - A Minimum CPU Platform for VM Instance. Availability and default CPU
+ platforms vary across zones, based on the hardware available in each GCP
+ zone.
+ [Details](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
+
+- `network` (string) - The Google Compute network id or URL to use for the launched instance.
+ Defaults to "default". If the value is not a URL, it will be
+ interpolated to
projects/((network_project_id))/global/networks/((network)). This value
is not required if a subnet is specified.
-- `network_project_id` (string) - The project ID for the network and
- subnetwork to use for launched instance. Defaults to project_id.
+- `network_project_id` (string) - The project ID for the network and subnetwork to use for launched
+ instance. Defaults to project_id.
-- `omit_external_ip` (bool) - If true, the instance will not have an
- external IP. use_internal_ip must be true if this property is true.
+- `omit_external_ip` (bool) - If true, the instance will not have an external IP. use_internal_ip must
+ be true if this property is true.
-- `on_host_maintenance` (string) - Sets Host Maintenance Option. Valid
- choices are MIGRATE and TERMINATE. Please see GCE Instance Scheduling
- Options,
- as not all machine_types support MIGRATE (i.e. machines with GPUs). If
- preemptible is true this can only be TERMINATE. If preemptible is false,
- it defaults to MIGRATE
+- `on_host_maintenance` (string) - Sets Host Maintenance Option. Valid choices are `MIGRATE` and
+ `TERMINATE`. Please see [GCE Instance Scheduling
+ Options](https://cloud.google.com/compute/docs/instances/setting-instance-scheduling-options),
+ as not all machine\_types support `MIGRATE` (i.e. machines with GPUs).
+ If preemptible is true this can only be `TERMINATE`. If preemptible is
+ false, it defaults to `MIGRATE`
- `preemptible` (bool) - If true, launch a preemptible instance.
-- `state_timeout` (string) - The time to wait for instance state changes.
- Defaults to "5m".
+- `state_timeout` (string) - The time to wait for instance state changes. Defaults to "5m".
-- `region` (string) - The region in which to launch the instance. Defaults to
- the region hosting the specified zone.
+- `region` (string) - The region in which to launch the instance. Defaults to the region
+ hosting the specified zone.
- `scopes` ([]string) - The service account scopes for launched
instance. Defaults to:
-- `service_account_email` (string) - The service account to be used for
- launched instance. Defaults to the project's default service account unless
- disable_default_service_account is true.
+ ``` json
+ [
+ "https://www.googleapis.com/auth/userinfo.email",
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ ```
-- `source_image_project_id` (string) - The project ID of the project
- containing the source image.
+- `service_account_email` (string) - The service account to be used for launched instance. Defaults to the
+ project's default service account unless disable_default_service_account
+ is true.
-- `startup_script_file` (string) - The path to a startup script to run on the
- VM from which the image will be made.
+- `source_image_project_id` (string) - The project ID of the project containing the source image.
-- `subnetwork` (string) - The Google Compute subnetwork id or URL to use for
- the launched instance. Only required if the network has been created with
- custom subnetting. Note, the region of the subnetwork must match the
- region or zone in which the VM is launched. If the value is not a URL,
- it will be interpolated to
+- `startup_script_file` (string) - The path to a startup script to run on the VM from which the image will
+ be made.
+
+- `subnetwork` (string) - The Google Compute subnetwork id or URL to use for the launched
+ instance. Only required if the network has been created with custom
+ subnetting. Note, the region of the subnetwork must match the region or
+ zone in which the VM is launched. If the value is not a URL, it will be
+ interpolated to
projects/((network_project_id))/regions/((region))/subnetworks/((subnetwork))
-- `tags` ([]string) - Assign network tags to apply firewall rules to
- VM instance.
+- `tags` ([]string) - Assign network tags to apply firewall rules to VM instance.
-- `use_internal_ip` (bool) - If true, use the instance's internal IP
- instead of its external IP during building.
+- `use_internal_ip` (bool) - If true, use the instance's internal IP instead of its external IP
+ during building.
\ No newline at end of file
diff --git a/website/source/partials/builder/googlecompute/_Config-required.html.md b/website/source/partials/builder/googlecompute/_Config-required.html.md
index c2a1213a4..3f364dd3b 100644
--- a/website/source/partials/builder/googlecompute/_Config-required.html.md
+++ b/website/source/partials/builder/googlecompute/_Config-required.html.md
@@ -1,17 +1,16 @@
-- `project_id` (string) - The project ID that will be used to launch
- instances and store images.
+- `project_id` (string) - The project ID that will be used to launch instances and store images.
-- `source_image` (string) - The source image to use to create the new image
- from. You can also specify source_image_family instead. If both
- source_image and source_image_family are specified, source_image
- takes precedence. Example: "debian-8-jessie-v20161027"
+- `source_image` (string) - The source image to use to create the new image from. You can also
+ specify source_image_family instead. If both source_image and
+ source_image_family are specified, source_image takes precedence.
+ Example: "debian-8-jessie-v20161027"
-- `source_image_family` (string) - The source image family to use to create
- the new image from. The image family always returns its latest image that
- is not deprecated. Example: "debian-8".
+- `source_image_family` (string) - The source image family to use to create the new image from. The image
+ family always returns its latest image that is not deprecated. Example:
+ "debian-8".
-- `zone` (string) - The zone in which to launch the instance used to create
- the image. Example: "us-central1-a"
+- `zone` (string) - The zone in which to launch the instance used to create the image.
+ Example: "us-central1-a"
\ No newline at end of file
From 101cb6d10cd77299d32b3ab1523847482befd09b Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 12 Jun 2019 15:21:05 +0200
Subject: [PATCH 39/97] Update modules.txt
---
vendor/modules.txt | 2 ++
1 file changed, 2 insertions(+)
diff --git a/vendor/modules.txt b/vendor/modules.txt
index c76da0c1c..ba228cd86 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -429,6 +429,8 @@ github.com/sirupsen/logrus
# github.com/stretchr/testify v1.3.0
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
+# github.com/temoto/robotstxt v0.0.0-20180810133444-97ee4a9ee6ea
+github.com/temoto/robotstxt
# github.com/tencentcloud/tencentcloud-sdk-go v0.0.0-20181220135002-f1744d40d346
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile
From 20a152d4aa11e8835fd34310fabf37232e90a9f7 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 12 Jun 2019 16:02:02 +0200
Subject: [PATCH 40/97] add comments to common/iso_config.go from docs
---
common/iso_config.go | 98 ++++++++++++++++++++++++++++++++++++++++----
1 file changed, 90 insertions(+), 8 deletions(-)
diff --git a/common/iso_config.go b/common/iso_config.go
index a66042efc..435cfa3c7 100644
--- a/common/iso_config.go
+++ b/common/iso_config.go
@@ -8,15 +8,97 @@ import (
"github.com/hashicorp/packer/template/interpolate"
)
-// ISOConfig contains configuration for downloading ISO images.
+// By default, Packer will symlink, download or copy image files to the Packer
+// cache into a "`hash($iso_url+$iso_checksum).$iso_target_extension`" file.
+// Packer uses [hashicorp/go-getter](https://github.com/hashicorp/go-getter) in
+// file mode in order to perform a download.
+//
+// go-getter supports the following protocols:
+//
+// * Local files
+// * Git
+// * Mercurial
+// * HTTP
+// * Amazon S3
+//
+//
+// \~> On windows - when referencing a local iso - if packer is running
+// without symlinking rights, the iso will be copied to the cache folder. Read
+// [Symlinks in Windows 10
+// !](https://blogs.windows.com/buildingapps/2016/12/02/symlinks-windows-10/)
+// for more info.
+//
+// Examples:
+// go-getter can guess the checksum type based on `iso_checksum` len.
+//
+// ``` json
+// {
+// "iso_checksum": "946a6077af6f5f95a51f82fdc44051c7aa19f9cfc5f737954845a6050543d7c2",
+// "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
+// }
+// ```
+//
+// ``` json
+// {
+// "iso_checksum_type": "file",
+// "iso_checksum": "ubuntu.org/..../ubuntu-14.04.1-server-amd64.iso.sum",
+// "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
+// }
+// ```
+//
+// ``` json
+// {
+// "iso_checksum_url": "./shasums.txt",
+// "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
+// }
+// ```
+//
+// ``` json
+// {
+// "iso_checksum_type": "sha256",
+// "iso_checksum_url": "./shasums.txt",
+// "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
+// }
+// ```
+//
type ISOConfig struct {
- ISOChecksum string `mapstructure:"iso_checksum"`
- ISOChecksumURL string `mapstructure:"iso_checksum_url"`
- ISOChecksumType string `mapstructure:"iso_checksum_type"`
- ISOUrls []string `mapstructure:"iso_urls"`
- TargetPath string `mapstructure:"iso_target_path"`
- TargetExtension string `mapstructure:"iso_target_extension"`
- RawSingleISOUrl string `mapstructure:"iso_url"`
+ // The checksum for the ISO file or virtual hard drive file. The algorithm
+ // to use when computing the checksum can be optionally specified with
+ // `iso_checksum_type`. When `iso_checksum_type` is not set packer will
+ // guess the checksumming type based on `iso_checksum` length.
+ // `iso_checksum` can be also be a file or an URL, in which case
+ // `iso_checksum_type` must be set to `file`; the go-getter will download
+ // it and use the first hash found.
+ ISOChecksum string `mapstructure:"iso_checksum" required:"true"`
+ // An URL to a checksum file containing a checksum for the ISO file. At
+ // least one of `iso_checksum` and `iso_checksum_url` must be defined.
+ // `iso_checksum_url` will be ignored if `iso_checksum` is non empty.
+ ISOChecksumURL string `mapstructure:"iso_checksum_url"`
+ // The algorithm to be used when computing the checksum of the file
+ // specified in `iso_checksum`. Currently, valid values are "", "none",
+ // "md5", "sha1", "sha256", "sha512" or "file". Since the validity of ISO
+ // and virtual disk files are typically crucial to a successful build,
+ // Packer performs a check of any supplied media by default. While setting
+ // "none" will cause Packer to skip this check, corruption of large files
+ // such as ISOs and virtual hard drives can occur from time to time. As
+ // such, skipping this check is not recommended. `iso_checksum_type` must
+ // be set to `file` when `iso_checksum` is an url.
+ ISOChecksumType string `mapstructure:"iso_checksum_type"`
+ // A URL to the ISO containing the installation image or virtual hard drive
+ // (VHD or VHDX) file to clone.
+ RawSingleISOUrl string `mapstructure:"iso_url" required:"true"`
+ // Multiple URLs for the ISO to download. Packer will try these in order.
+ // If anything goes wrong attempting to download or while downloading a
+ // single URL, it will move on to the next. All URLs must point to the same
+ // file (same checksum). By default this is empty and `iso_url` is used.
+ // Only one of `iso_url` or `iso_urls` can be specified.
+ ISOUrls []string `mapstructure:"iso_urls"`
+ // The path where the iso should be saved after download. By default will
+ // go in the packer cache, with a hash of the original filename and
+ // checksum as its name.
+ TargetPath string `mapstructure:"iso_target_path"`
+ // The extension of the iso file after download. This defaults to `iso`.
+ TargetExtension string `mapstructure:"iso_target_extension"`
}
func (c *ISOConfig) Prepare(ctx *interpolate.Context) (warnings []string, errs []error) {
From dbfadc0eea3bc4a927674c7156f89585cf12cb06 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 12 Jun 2019 16:02:49 +0200
Subject: [PATCH 41/97] generate struct markdown from common/iso_config.go
---
common/iso_config.go | 2 ++
.../common/_ISOConfig-not-required.html.md | 28 +++++++++++++++++++
.../common/_ISOConfig-required.html.md | 13 +++++++++
3 files changed, 43 insertions(+)
create mode 100644 website/source/partials/common/_ISOConfig-not-required.html.md
create mode 100644 website/source/partials/common/_ISOConfig-required.html.md
diff --git a/common/iso_config.go b/common/iso_config.go
index 435cfa3c7..4a0092d80 100644
--- a/common/iso_config.go
+++ b/common/iso_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
diff --git a/website/source/partials/common/_ISOConfig-not-required.html.md b/website/source/partials/common/_ISOConfig-not-required.html.md
new file mode 100644
index 000000000..633eef401
--- /dev/null
+++ b/website/source/partials/common/_ISOConfig-not-required.html.md
@@ -0,0 +1,28 @@
+
+
+- `iso_checksum_url` (string) - An URL to a checksum file containing a checksum for the ISO file. At
+ least one of `iso_checksum` and `iso_checksum_url` must be defined.
+ `iso_checksum_url` will be ignored if `iso_checksum` is non empty.
+
+- `iso_checksum_type` (string) - The algorithm to be used when computing the checksum of the file
+ specified in `iso_checksum`. Currently, valid values are "", "none",
+ "md5", "sha1", "sha256", "sha512" or "file". Since the validity of ISO
+ and virtual disk files are typically crucial to a successful build,
+ Packer performs a check of any supplied media by default. While setting
+ "none" will cause Packer to skip this check, corruption of large files
+ such as ISOs and virtual hard drives can occur from time to time. As
+ such, skipping this check is not recommended. `iso_checksum_type` must
+ be set to `file` when `iso_checksum` is an url.
+
+- `iso_urls` ([]string) - Multiple URLs for the ISO to download. Packer will try these in order.
+ If anything goes wrong attempting to download or while downloading a
+ single URL, it will move on to the next. All URLs must point to the same
+ file (same checksum). By default this is empty and `iso_url` is used.
+ Only one of `iso_url` or `iso_urls` can be specified.
+
+- `iso_target_path` (string) - The path where the iso should be saved after download. By default will
+ go in the packer cache, with a hash of the original filename and
+ checksum as its name.
+
+- `iso_target_extension` (string) - The extension of the iso file after download. This defaults to `iso`.
+
\ No newline at end of file
diff --git a/website/source/partials/common/_ISOConfig-required.html.md b/website/source/partials/common/_ISOConfig-required.html.md
new file mode 100644
index 000000000..1508f6aa0
--- /dev/null
+++ b/website/source/partials/common/_ISOConfig-required.html.md
@@ -0,0 +1,13 @@
+
+
+- `iso_checksum` (string) - The checksum for the ISO file or virtual hard drive file. The algorithm
+ to use when computing the checksum can be optionally specified with
+ `iso_checksum_type`. When `iso_checksum_type` is not set packer will
+ guess the checksumming type based on `iso_checksum` length.
+ `iso_checksum` can be also be a file or an URL, in which case
+ `iso_checksum_type` must be set to `file`; the go-getter will download
+ it and use the first hash found.
+
+- `iso_url` (string) - A URL to the ISO containing the installation image or virtual hard drive
+ (VHD or VHDX) file to clone.
+
\ No newline at end of file
From 349c3ef74b96099581568fa5ead013d43b586cbd Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 12 Jun 2019 16:25:08 +0200
Subject: [PATCH 42/97] cmd/struct-markdown: also generate docs from header
comments of a struct
& generate it for ISOConfig
---
cmd/struct-markdown/main.go | 10 +++-
cmd/struct-markdown/template.go | 2 +
.../source/partials/common/_ISOConfig.html.md | 52 +++++++++++++++++++
3 files changed, 62 insertions(+), 2 deletions(-)
create mode 100644 website/source/partials/common/_ISOConfig.html.md
diff --git a/cmd/struct-markdown/main.go b/cmd/struct-markdown/main.go
index e5a31ff5d..545ed576f 100644
--- a/cmd/struct-markdown/main.go
+++ b/cmd/struct-markdown/main.go
@@ -60,6 +60,12 @@ func main() {
}
fields := structDecl.Fields.List
+ header := Struct{
+ SourcePath: paths[1],
+ Name: typeSpec.Name.Name,
+ Filename: "_" + typeSpec.Name.Name + ".html.md",
+ Header: typeDecl.Doc.Text(),
+ }
required := Struct{
SourcePath: paths[1],
Name: typeSpec.Name.Name,
@@ -115,8 +121,8 @@ func main() {
dir := filepath.Join(packerDir, "website", "source", "partials", builderName)
os.MkdirAll(dir, 0755)
- for _, str := range []Struct{required, notRequired} {
- if len(str.Fields) == 0 {
+ for _, str := range []Struct{header, required, notRequired} {
+ if len(str.Fields) == 0 && len(str.Header) == 0 {
continue
}
outputPath := filepath.Join(dir, str.Filename)
diff --git a/cmd/struct-markdown/template.go b/cmd/struct-markdown/template.go
index e4a3e1a87..1a6c3bdc9 100644
--- a/cmd/struct-markdown/template.go
+++ b/cmd/struct-markdown/template.go
@@ -15,6 +15,7 @@ type Struct struct {
SourcePath string
Name string
Filename string
+ Header string
Fields []Field
}
@@ -23,6 +24,7 @@ var structDocsTemplate = template.Must(template.New("structDocsTemplate").
"indent": indent,
}).
Parse(`
+{{ .Header -}}
{{range .Fields}}
- ` + "`" + `{{ .Name}}` + "`" + ` ({{ .Type }}) - {{ .Docs | indent 4 }}
{{- end -}}`))
diff --git a/website/source/partials/common/_ISOConfig.html.md b/website/source/partials/common/_ISOConfig.html.md
new file mode 100644
index 000000000..ab0097107
--- /dev/null
+++ b/website/source/partials/common/_ISOConfig.html.md
@@ -0,0 +1,52 @@
+
+By default, Packer will symlink, download or copy image files to the Packer
+cache into a "`hash($iso_url+$iso_checksum).$iso_target_extension`" file.
+Packer uses [hashicorp/go-getter](https://github.com/hashicorp/go-getter) in
+file mode in order to perform a download.
+
+go-getter supports the following protocols:
+
+* Local files
+* Git
+* Mercurial
+* HTTP
+* Amazon S3
+
+\~> On windows - when referencing a local iso - if packer is running
+without symlinking rights, the iso will be copied to the cache folder. Read
+[Symlinks in Windows 10
+!](https://blogs.windows.com/buildingapps/2016/12/02/symlinks-windows-10/)
+for more info.
+
+Examples:
+go-getter can guess the checksum type based on `iso_checksum` len.
+
+``` json
+{
+ "iso_checksum": "946a6077af6f5f95a51f82fdc44051c7aa19f9cfc5f737954845a6050543d7c2",
+ "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
+}
+```
+
+``` json
+{
+ "iso_checksum_type": "file",
+ "iso_checksum": "ubuntu.org/..../ubuntu-14.04.1-server-amd64.iso.sum",
+ "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
+}
+```
+
+``` json
+{
+ "iso_checksum_url": "./shasums.txt",
+ "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
+}
+```
+
+``` json
+{
+ "iso_checksum_type": "sha256",
+ "iso_checksum_url": "./shasums.txt",
+ "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
+}
+```
From 38b9bb1097a3479375751d74551245396ca0a3b1 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 12 Jun 2019 16:32:24 +0200
Subject: [PATCH 43/97] use autogenerated templates for iso config docs
---
.../docs/builders/hyperv-iso.html.md.erb | 12 ++-
.../docs/builders/hyperv-vmcx.html.md.erb | 12 ++-
.../docs/builders/parallels-iso.html.md.erb | 12 ++-
.../docs/builders/virtualbox-iso.html.md.erb | 12 ++-
.../docs/builders/vmware-iso.html.md.erb | 12 ++-
.../partials/builders/_iso-config.html.md | 99 -------------------
6 files changed, 55 insertions(+), 104 deletions(-)
delete mode 100644 website/source/partials/builders/_iso-config.html.md
diff --git a/website/source/docs/builders/hyperv-iso.html.md.erb b/website/source/docs/builders/hyperv-iso.html.md.erb
index 72693ce7a..7b55e5063 100644
--- a/website/source/docs/builders/hyperv-iso.html.md.erb
+++ b/website/source/docs/builders/hyperv-iso.html.md.erb
@@ -48,7 +48,17 @@ leading to corruption of files or lost changes. As such, it is important to
add a `shutdown_command`. This tells Packer how to safely shutdown and
power off the VM.
-<%= partial "partials/builders/iso-config" %>
+## ISO Configuration Reference
+
+<%= partial "partials/common/ISOConfig" %>
+
+### Required:
+
+<%= partial "partials/common/ISOConfig-required" %>
+
+### Optional:
+
+<%= partial "partials/common/ISOConfig-not-required" %>
## Configuration Reference
diff --git a/website/source/docs/builders/hyperv-vmcx.html.md.erb b/website/source/docs/builders/hyperv-vmcx.html.md.erb
index 34121076c..760801a00 100644
--- a/website/source/docs/builders/hyperv-vmcx.html.md.erb
+++ b/website/source/docs/builders/hyperv-vmcx.html.md.erb
@@ -69,7 +69,17 @@ In addition to the options listed here, a
[communicator](/docs/templates/communicator.html) can be configured for this
builder.
-<%= partial "partials/builders/iso-config" %>
+## ISO Configuration Reference
+
+<%= partial "partials/common/ISOConfig" %>
+
+### Required:
+
+<%= partial "partials/common/ISOConfig-required" %>
+
+### Optional:
+
+<%= partial "partials/common/ISOConfig-not-required" %>
### Required for virtual machine import:
diff --git a/website/source/docs/builders/parallels-iso.html.md.erb b/website/source/docs/builders/parallels-iso.html.md.erb
index 5f7e69f1c..8a8d66745 100644
--- a/website/source/docs/builders/parallels-iso.html.md.erb
+++ b/website/source/docs/builders/parallels-iso.html.md.erb
@@ -57,7 +57,17 @@ In addition to the options listed here, a
[communicator](/docs/templates/communicator.html) can be configured for this
builder.
-<%= partial "partials/builders/iso-config" %>
+## ISO Configuration Reference
+
+<%= partial "partials/common/ISOConfig" %>
+
+### Required:
+
+<%= partial "partials/common/ISOConfig-required" %>
+
+### Optional:
+
+<%= partial "partials/common/ISOConfig-not-required" %>
### Required:
diff --git a/website/source/docs/builders/virtualbox-iso.html.md.erb b/website/source/docs/builders/virtualbox-iso.html.md.erb
index 5b43b5aab..251e87e41 100644
--- a/website/source/docs/builders/virtualbox-iso.html.md.erb
+++ b/website/source/docs/builders/virtualbox-iso.html.md.erb
@@ -45,7 +45,17 @@ It is important to add a `shutdown_command`. By default Packer halts the virtual
machine and the file system may not be sync'd. Thus, changes made in a
provisioner might not be saved.
-<%= partial "partials/builders/iso-config" %>
+## ISO Configuration Reference
+
+<%= partial "partials/common/ISOConfig" %>
+
+### Required:
+
+<%= partial "partials/common/ISOConfig-required" %>
+
+### Optional:
+
+<%= partial "partials/common/ISOConfig-not-required" %>
## Configuration Reference
diff --git a/website/source/docs/builders/vmware-iso.html.md.erb b/website/source/docs/builders/vmware-iso.html.md.erb
index feaa919fd..da23b8379 100644
--- a/website/source/docs/builders/vmware-iso.html.md.erb
+++ b/website/source/docs/builders/vmware-iso.html.md.erb
@@ -49,7 +49,17 @@ self-install. Still, the example serves to show the basic configuration:
}
```
-<%= partial "partials/builders/iso-config" %>
+## ISO Configuration Reference
+
+<%= partial "partials/common/ISOConfig" %>
+
+### Required:
+
+<%= partial "partials/common/ISOConfig-required" %>
+
+### Optional:
+
+<%= partial "partials/common/ISOConfig-not-required" %>
## Configuration Reference
diff --git a/website/source/partials/builders/_iso-config.html.md b/website/source/partials/builders/_iso-config.html.md
deleted file mode 100644
index bea94b688..000000000
--- a/website/source/partials/builders/_iso-config.html.md
+++ /dev/null
@@ -1,99 +0,0 @@
-## ISO Configuration Reference
-
-By default, Packer will symlink, download or copy image files to the Packer
-cache into a "`hash($iso_url+$iso_checksum).$iso_target_extension`" file.
-Packer uses [hashicorp/go-getter](https://github.com/hashicorp/go-getter) in
-file mode in order to perform a download.
-
-go-getter supports the following protocols:
-
-* Local files
-* Git
-* Mercurial
-* HTTP
-* Amazon S3
-
-
-\~> On windows - when referencing a local iso - if packer is running without
-symlinking rights, the iso will be copied to the cache folder. Read [Symlinks
-in Windows 10
-!](https://blogs.windows.com/buildingapps/2016/12/02/symlinks-windows-10/) for
-more info.
-
-### Required:
-
-- `iso_checksum` (string) - The checksum for the ISO file or virtual hard
- drive file. The algorithm to use when computing the checksum can be
- optionally specified with `iso_checksum_type`. When `iso_checksum_type` is
- not set packer will guess the checksumming type based on `iso_checksum`
- length. `iso_checksum` can be also be a file or an URL, in which case
- `iso_checksum_type` must be set to `file`; the go-getter will download it
- and use the first hash found.
-
-- `iso_url` (string) - A URL to the ISO containing the installation image or
- virtual hard drive (VHD or VHDX) file to clone.
-
-### Optional:
-
-- `iso_checksum_type` (string) - The algorithm to be used when computing the
- checksum of the file specified in `iso_checksum`. Currently, valid values
- are "", "none", "md5", "sha1", "sha256", "sha512" or "file". Since the
- validity of ISO and virtual disk files are typically crucial to a
- successful build, Packer performs a check of any supplied media by default.
- While setting "none" will cause Packer to skip this check, corruption of
- large files such as ISOs and virtual hard drives can occur from time to
- time. As such, skipping this check is not recommended. `iso_checksum_type`
- must be set to `file` when `iso_checksum` is an url.
-
-- `iso_checksum_url` (string) - A URL to a checksum file containing a
- checksum for the ISO file. At least one of `iso_checksum` and
- `iso_checksum_url` must be defined. `iso_checksum_url` will be ignored if
- `iso_checksum` is non empty.
-
-- `iso_target_extension` (string) - The extension of the iso file after
- download. This defaults to `iso`.
-
-- `iso_target_path` (string) - The path where the iso should be saved after
- download. By default will go in the packer cache, with a hash of the
- original filename and checksum as its name.
-
-- `iso_urls` (array of strings) - Multiple URLs for the ISO to download.
- Packer will try these in order. If anything goes wrong attempting to
- download or while downloading a single URL, it will move on to the next.
- All URLs must point to the same file (same checksum). By default this is
- empty and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be
- specified.
-
-### Example ISO configurations
-
-go-getter can guess the checksum type based on `iso_checksum` len.
-
-``` json
-{
- "iso_checksum": "946a6077af6f5f95a51f82fdc44051c7aa19f9cfc5f737954845a6050543d7c2",
- "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
-}
-```
-
-``` json
-{
- "iso_checksum_type": "file",
- "iso_checksum": "ubuntu.org/..../ubuntu-14.04.1-server-amd64.iso.sum",
- "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
-}
-```
-
-``` json
-{
- "iso_checksum_url": "./shasums.txt",
- "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
-}
-```
-
-``` json
-{
- "iso_checksum_type": "sha256",
- "iso_checksum_url": "./shasums.txt",
- "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
-}
-```
From f4d5842b7266963061a8c478b75833f87f37262e Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 12 Jun 2019 16:33:18 +0200
Subject: [PATCH 44/97] generate all header docs
---
.../builder/alicloud/ecs/_AlicloudAccessConfig.html.md | 2 ++
website/source/partials/builder/amazon/chroot/_Config.html.md | 3 +++
.../source/partials/builder/amazon/common/_AMIConfig.html.md | 2 ++
.../partials/builder/amazon/common/_AccessConfig.html.md | 2 ++
.../partials/builder/amazon/common/_BlockDevice.html.md | 2 ++
.../source/partials/builder/amazon/common/_RunConfig.html.md | 3 +++
.../source/partials/builder/amazon/instance/_Config.html.md | 3 +++
.../source/partials/builder/azure/arm/_ClientConfig.html.md | 2 ++
website/source/partials/builder/cloudstack/_Config.html.md | 2 ++
website/source/partials/builder/googlecompute/_Config.html.md | 4 ++++
website/source/partials/builder/hyperv/iso/_Builder.html.md | 3 +++
website/source/partials/builder/hyperv/vmcx/_Builder.html.md | 3 +++
website/source/partials/builder/ncloud/_Config.html.md | 2 ++
.../source/partials/builder/openstack/_AccessConfig.html.md | 2 ++
.../source/partials/builder/openstack/_ImageConfig.html.md | 2 ++
website/source/partials/builder/openstack/_RunConfig.html.md | 3 +++
.../partials/builder/parallels/common/_OutputConfig.html.md | 2 ++
.../partials/builder/parallels/common/_PrlctlConfig.html.md | 3 +++
.../builder/parallels/common/_PrlctlPostConfig.html.md | 3 +++
.../builder/parallels/common/_PrlctlVersionConfig.html.md | 2 ++
.../partials/builder/parallels/common/_ShutdownConfig.html.md | 2 ++
.../partials/builder/parallels/common/_ToolsConfig.html.md | 2 ++
website/source/partials/builder/parallels/pvm/_Config.html.md | 2 ++
website/source/partials/builder/triton/_AccessConfig.html.md | 2 ++
.../partials/builder/triton/_SourceMachineConfig.html.md | 3 +++
.../source/partials/builder/triton/_TargetImageConfig.html.md | 3 +++
website/source/partials/builder/vagrant/_Builder.html.md | 3 +++
.../source/partials/builder/virtualbox/ovf/_Config.html.md | 2 ++
.../partials/builder/vmware/common/_ParallelUnion.html.md | 2 ++
.../partials/builder/vmware/common/_SerialConfigPipe.html.md | 2 ++
website/source/partials/builder/vmware/vmx/_Config.html.md | 2 ++
website/source/partials/helper/communicator/_Config.html.md | 3 +++
32 files changed, 78 insertions(+)
create mode 100644 website/source/partials/builder/alicloud/ecs/_AlicloudAccessConfig.html.md
create mode 100644 website/source/partials/builder/amazon/chroot/_Config.html.md
create mode 100644 website/source/partials/builder/amazon/common/_AMIConfig.html.md
create mode 100644 website/source/partials/builder/amazon/common/_AccessConfig.html.md
create mode 100644 website/source/partials/builder/amazon/common/_BlockDevice.html.md
create mode 100644 website/source/partials/builder/amazon/common/_RunConfig.html.md
create mode 100644 website/source/partials/builder/amazon/instance/_Config.html.md
create mode 100644 website/source/partials/builder/azure/arm/_ClientConfig.html.md
create mode 100644 website/source/partials/builder/cloudstack/_Config.html.md
create mode 100644 website/source/partials/builder/googlecompute/_Config.html.md
create mode 100644 website/source/partials/builder/hyperv/iso/_Builder.html.md
create mode 100644 website/source/partials/builder/hyperv/vmcx/_Builder.html.md
create mode 100644 website/source/partials/builder/ncloud/_Config.html.md
create mode 100644 website/source/partials/builder/openstack/_AccessConfig.html.md
create mode 100644 website/source/partials/builder/openstack/_ImageConfig.html.md
create mode 100644 website/source/partials/builder/openstack/_RunConfig.html.md
create mode 100644 website/source/partials/builder/parallels/common/_OutputConfig.html.md
create mode 100644 website/source/partials/builder/parallels/common/_PrlctlConfig.html.md
create mode 100644 website/source/partials/builder/parallels/common/_PrlctlPostConfig.html.md
create mode 100644 website/source/partials/builder/parallels/common/_PrlctlVersionConfig.html.md
create mode 100644 website/source/partials/builder/parallels/common/_ShutdownConfig.html.md
create mode 100644 website/source/partials/builder/parallels/common/_ToolsConfig.html.md
create mode 100644 website/source/partials/builder/parallels/pvm/_Config.html.md
create mode 100644 website/source/partials/builder/triton/_AccessConfig.html.md
create mode 100644 website/source/partials/builder/triton/_SourceMachineConfig.html.md
create mode 100644 website/source/partials/builder/triton/_TargetImageConfig.html.md
create mode 100644 website/source/partials/builder/vagrant/_Builder.html.md
create mode 100644 website/source/partials/builder/virtualbox/ovf/_Config.html.md
create mode 100644 website/source/partials/builder/vmware/common/_ParallelUnion.html.md
create mode 100644 website/source/partials/builder/vmware/common/_SerialConfigPipe.html.md
create mode 100644 website/source/partials/builder/vmware/vmx/_Config.html.md
create mode 100644 website/source/partials/helper/communicator/_Config.html.md
diff --git a/website/source/partials/builder/alicloud/ecs/_AlicloudAccessConfig.html.md b/website/source/partials/builder/alicloud/ecs/_AlicloudAccessConfig.html.md
new file mode 100644
index 000000000..5898c5f0e
--- /dev/null
+++ b/website/source/partials/builder/alicloud/ecs/_AlicloudAccessConfig.html.md
@@ -0,0 +1,2 @@
+
+Config of alicloud
diff --git a/website/source/partials/builder/amazon/chroot/_Config.html.md b/website/source/partials/builder/amazon/chroot/_Config.html.md
new file mode 100644
index 000000000..7831c185a
--- /dev/null
+++ b/website/source/partials/builder/amazon/chroot/_Config.html.md
@@ -0,0 +1,3 @@
+
+Config is the configuration that is chained through the steps and
+settable from the template.
diff --git a/website/source/partials/builder/amazon/common/_AMIConfig.html.md b/website/source/partials/builder/amazon/common/_AMIConfig.html.md
new file mode 100644
index 000000000..55af4c4cf
--- /dev/null
+++ b/website/source/partials/builder/amazon/common/_AMIConfig.html.md
@@ -0,0 +1,2 @@
+
+AMIConfig is for common configuration related to creating AMIs.
diff --git a/website/source/partials/builder/amazon/common/_AccessConfig.html.md b/website/source/partials/builder/amazon/common/_AccessConfig.html.md
new file mode 100644
index 000000000..092224e9c
--- /dev/null
+++ b/website/source/partials/builder/amazon/common/_AccessConfig.html.md
@@ -0,0 +1,2 @@
+
+AccessConfig is for common configuration related to AWS access
diff --git a/website/source/partials/builder/amazon/common/_BlockDevice.html.md b/website/source/partials/builder/amazon/common/_BlockDevice.html.md
new file mode 100644
index 000000000..3d831aa0d
--- /dev/null
+++ b/website/source/partials/builder/amazon/common/_BlockDevice.html.md
@@ -0,0 +1,2 @@
+
+BlockDevice
diff --git a/website/source/partials/builder/amazon/common/_RunConfig.html.md b/website/source/partials/builder/amazon/common/_RunConfig.html.md
new file mode 100644
index 000000000..c67a8fb25
--- /dev/null
+++ b/website/source/partials/builder/amazon/common/_RunConfig.html.md
@@ -0,0 +1,3 @@
+
+RunConfig contains configuration for running an instance from a source
+AMI and details on how to access that launched image.
diff --git a/website/source/partials/builder/amazon/instance/_Config.html.md b/website/source/partials/builder/amazon/instance/_Config.html.md
new file mode 100644
index 000000000..dd749cca0
--- /dev/null
+++ b/website/source/partials/builder/amazon/instance/_Config.html.md
@@ -0,0 +1,3 @@
+
+Config is the configuration that is chained through the steps and
+settable from the template.
diff --git a/website/source/partials/builder/azure/arm/_ClientConfig.html.md b/website/source/partials/builder/azure/arm/_ClientConfig.html.md
new file mode 100644
index 000000000..89ed1f25d
--- /dev/null
+++ b/website/source/partials/builder/azure/arm/_ClientConfig.html.md
@@ -0,0 +1,2 @@
+
+ClientConfig allows for various ways to authenticate Azure clients
diff --git a/website/source/partials/builder/cloudstack/_Config.html.md b/website/source/partials/builder/cloudstack/_Config.html.md
new file mode 100644
index 000000000..0b401cd22
--- /dev/null
+++ b/website/source/partials/builder/cloudstack/_Config.html.md
@@ -0,0 +1,2 @@
+
+Config holds all the details needed to configure the builder.
diff --git a/website/source/partials/builder/googlecompute/_Config.html.md b/website/source/partials/builder/googlecompute/_Config.html.md
new file mode 100644
index 000000000..63b5232d7
--- /dev/null
+++ b/website/source/partials/builder/googlecompute/_Config.html.md
@@ -0,0 +1,4 @@
+
+Config is the configuration structure for the GCE builder. It stores
+both the publicly settable state as well as the privately generated
+state of the config object.
diff --git a/website/source/partials/builder/hyperv/iso/_Builder.html.md b/website/source/partials/builder/hyperv/iso/_Builder.html.md
new file mode 100644
index 000000000..ffb9a0873
--- /dev/null
+++ b/website/source/partials/builder/hyperv/iso/_Builder.html.md
@@ -0,0 +1,3 @@
+
+Builder implements packer.Builder and builds the actual Hyperv
+images.
diff --git a/website/source/partials/builder/hyperv/vmcx/_Builder.html.md b/website/source/partials/builder/hyperv/vmcx/_Builder.html.md
new file mode 100644
index 000000000..5e0c3c265
--- /dev/null
+++ b/website/source/partials/builder/hyperv/vmcx/_Builder.html.md
@@ -0,0 +1,3 @@
+
+Builder implements packer.Builder and builds the actual Hyperv
+images.
diff --git a/website/source/partials/builder/ncloud/_Config.html.md b/website/source/partials/builder/ncloud/_Config.html.md
new file mode 100644
index 000000000..7a02a6c27
--- /dev/null
+++ b/website/source/partials/builder/ncloud/_Config.html.md
@@ -0,0 +1,2 @@
+
+Config is structure to use packer builder plugin for Naver Cloud Platform
diff --git a/website/source/partials/builder/openstack/_AccessConfig.html.md b/website/source/partials/builder/openstack/_AccessConfig.html.md
new file mode 100644
index 000000000..f92fbd550
--- /dev/null
+++ b/website/source/partials/builder/openstack/_AccessConfig.html.md
@@ -0,0 +1,2 @@
+
+AccessConfig is for common configuration related to openstack access
diff --git a/website/source/partials/builder/openstack/_ImageConfig.html.md b/website/source/partials/builder/openstack/_ImageConfig.html.md
new file mode 100644
index 000000000..45f731377
--- /dev/null
+++ b/website/source/partials/builder/openstack/_ImageConfig.html.md
@@ -0,0 +1,2 @@
+
+ImageConfig is for common configuration related to creating Images.
diff --git a/website/source/partials/builder/openstack/_RunConfig.html.md b/website/source/partials/builder/openstack/_RunConfig.html.md
new file mode 100644
index 000000000..a6c9c023a
--- /dev/null
+++ b/website/source/partials/builder/openstack/_RunConfig.html.md
@@ -0,0 +1,3 @@
+
+RunConfig contains configuration for running an instance from a source
+image and details on how to access that launched image.
diff --git a/website/source/partials/builder/parallels/common/_OutputConfig.html.md b/website/source/partials/builder/parallels/common/_OutputConfig.html.md
new file mode 100644
index 000000000..8fdd9cb7f
--- /dev/null
+++ b/website/source/partials/builder/parallels/common/_OutputConfig.html.md
@@ -0,0 +1,2 @@
+
+OutputConfig contains the configuration for builder's output.
diff --git a/website/source/partials/builder/parallels/common/_PrlctlConfig.html.md b/website/source/partials/builder/parallels/common/_PrlctlConfig.html.md
new file mode 100644
index 000000000..518286eab
--- /dev/null
+++ b/website/source/partials/builder/parallels/common/_PrlctlConfig.html.md
@@ -0,0 +1,3 @@
+
+PrlctlConfig contains the configuration for running "prlctl" commands
+before the VM start.
diff --git a/website/source/partials/builder/parallels/common/_PrlctlPostConfig.html.md b/website/source/partials/builder/parallels/common/_PrlctlPostConfig.html.md
new file mode 100644
index 000000000..35cc46e66
--- /dev/null
+++ b/website/source/partials/builder/parallels/common/_PrlctlPostConfig.html.md
@@ -0,0 +1,3 @@
+
+PrlctlPostConfig contains the configuration for running "prlctl" commands
+in the end of artifact build.
diff --git a/website/source/partials/builder/parallels/common/_PrlctlVersionConfig.html.md b/website/source/partials/builder/parallels/common/_PrlctlVersionConfig.html.md
new file mode 100644
index 000000000..3dfa090c1
--- /dev/null
+++ b/website/source/partials/builder/parallels/common/_PrlctlVersionConfig.html.md
@@ -0,0 +1,2 @@
+
+PrlctlVersionConfig contains the configuration for `prlctl` version.
diff --git a/website/source/partials/builder/parallels/common/_ShutdownConfig.html.md b/website/source/partials/builder/parallels/common/_ShutdownConfig.html.md
new file mode 100644
index 000000000..4401e453e
--- /dev/null
+++ b/website/source/partials/builder/parallels/common/_ShutdownConfig.html.md
@@ -0,0 +1,2 @@
+
+ShutdownConfig contains the configuration for VM shutdown.
diff --git a/website/source/partials/builder/parallels/common/_ToolsConfig.html.md b/website/source/partials/builder/parallels/common/_ToolsConfig.html.md
new file mode 100644
index 000000000..76c3773ba
--- /dev/null
+++ b/website/source/partials/builder/parallels/common/_ToolsConfig.html.md
@@ -0,0 +1,2 @@
+
+ToolsConfig contains the builder configuration related to Parallels Tools.
diff --git a/website/source/partials/builder/parallels/pvm/_Config.html.md b/website/source/partials/builder/parallels/pvm/_Config.html.md
new file mode 100644
index 000000000..31b4e2f09
--- /dev/null
+++ b/website/source/partials/builder/parallels/pvm/_Config.html.md
@@ -0,0 +1,2 @@
+
+Config is the configuration structure for the builder.
diff --git a/website/source/partials/builder/triton/_AccessConfig.html.md b/website/source/partials/builder/triton/_AccessConfig.html.md
new file mode 100644
index 000000000..2a1f4f9ce
--- /dev/null
+++ b/website/source/partials/builder/triton/_AccessConfig.html.md
@@ -0,0 +1,2 @@
+
+AccessConfig is for common configuration related to Triton access
diff --git a/website/source/partials/builder/triton/_SourceMachineConfig.html.md b/website/source/partials/builder/triton/_SourceMachineConfig.html.md
new file mode 100644
index 000000000..c74dc9012
--- /dev/null
+++ b/website/source/partials/builder/triton/_SourceMachineConfig.html.md
@@ -0,0 +1,3 @@
+
+SourceMachineConfig represents the configuration to run a machine using
+the SDC API in order for provisioning to take place.
diff --git a/website/source/partials/builder/triton/_TargetImageConfig.html.md b/website/source/partials/builder/triton/_TargetImageConfig.html.md
new file mode 100644
index 000000000..d940ce624
--- /dev/null
+++ b/website/source/partials/builder/triton/_TargetImageConfig.html.md
@@ -0,0 +1,3 @@
+
+TargetImageConfig represents the configuration for the image to be created
+from the source machine.
diff --git a/website/source/partials/builder/vagrant/_Builder.html.md b/website/source/partials/builder/vagrant/_Builder.html.md
new file mode 100644
index 000000000..9db960859
--- /dev/null
+++ b/website/source/partials/builder/vagrant/_Builder.html.md
@@ -0,0 +1,3 @@
+
+Builder implements packer.Builder and builds the actual VirtualBox
+images.
diff --git a/website/source/partials/builder/virtualbox/ovf/_Config.html.md b/website/source/partials/builder/virtualbox/ovf/_Config.html.md
new file mode 100644
index 000000000..55488ff4b
--- /dev/null
+++ b/website/source/partials/builder/virtualbox/ovf/_Config.html.md
@@ -0,0 +1,2 @@
+
+Config is the configuration structure for the builder.
diff --git a/website/source/partials/builder/vmware/common/_ParallelUnion.html.md b/website/source/partials/builder/vmware/common/_ParallelUnion.html.md
new file mode 100644
index 000000000..fc2e092f6
--- /dev/null
+++ b/website/source/partials/builder/vmware/common/_ParallelUnion.html.md
@@ -0,0 +1,2 @@
+
+ parallel port
diff --git a/website/source/partials/builder/vmware/common/_SerialConfigPipe.html.md b/website/source/partials/builder/vmware/common/_SerialConfigPipe.html.md
new file mode 100644
index 000000000..067351033
--- /dev/null
+++ b/website/source/partials/builder/vmware/common/_SerialConfigPipe.html.md
@@ -0,0 +1,2 @@
+
+ serial conversions
diff --git a/website/source/partials/builder/vmware/vmx/_Config.html.md b/website/source/partials/builder/vmware/vmx/_Config.html.md
new file mode 100644
index 000000000..a25ad5386
--- /dev/null
+++ b/website/source/partials/builder/vmware/vmx/_Config.html.md
@@ -0,0 +1,2 @@
+
+Config is the configuration structure for the builder.
diff --git a/website/source/partials/helper/communicator/_Config.html.md b/website/source/partials/helper/communicator/_Config.html.md
new file mode 100644
index 000000000..2a07eba25
--- /dev/null
+++ b/website/source/partials/helper/communicator/_Config.html.md
@@ -0,0 +1,3 @@
+
+Config is the common configuration that communicators allow within
+a builder.
From 76f50619b1981246a8c04a74e2ab69ed54214f6b Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Wed, 12 Jun 2019 18:38:37 +0200
Subject: [PATCH 45/97] builder/qemu/builder.go: Document from Config file
---
builder/qemu/builder.go | 146 +++++++---
website/source/docs/builders/qemu.html.md.erb | 258 +-----------------
.../builder/qemu/_Config-not-required.html.md | 51 ++--
3 files changed, 149 insertions(+), 306 deletions(-)
diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go
index 7473b071d..b36469f31 100644
--- a/builder/qemu/builder.go
+++ b/builder/qemu/builder.go
@@ -98,29 +98,49 @@ type Config struct {
Comm communicator.Config `mapstructure:",squash"`
common.FloppyConfig `mapstructure:",squash"`
// Use iso from provided url. Qemu must support
- // curl block device. This defaults to false.
+ // curl block device. This defaults to `false`.
ISOSkipCache bool `mapstructure:"iso_skip_cache" required:"false"`
// The accelerator type to use when running the VM.
- // This may be none, kvm, tcg, hax, hvf, whpx, or xen. The appropriate
+ // This may be `none`, `kvm`, `tcg`, `hax`, `hvf`, `whpx`, or `xen`. The appropriate
// software must have already been installed on your build machine to use the
// accelerator you specified. When no accelerator is specified, Packer will try
- // to use kvm if it is available but will default to tcg otherwise.
+ // to use `kvm` if it is available but will default to `tcg` otherwise.
+ //
+ // -> The `hax` accelerator has issues attaching CDROM ISOs. This is an
+ // upstream issue which can be tracked
+ // [here](https://github.com/intel/haxm/issues/20).
+ //
+ // -> The `hvf` and `whpx` accelerator are new and experimental as of
+ // [QEMU 2.12.0](https://wiki.qemu.org/ChangeLog/2.12#Host_support).
+ // You may encounter issues unrelated to Packer when using these. You may need to
+ // add [ "-global", "virtio-pci.disable-modern=on" ] to `qemuargs` depending on the
+ // guest operating system.
+ //
+ // -> For `whpx`, note that [Stefan Weil's QEMU for Windows distribution](https://qemu.weilnetz.de/w64/)
+ // does not include WHPX support and users may need to compile or source a
+ // build of QEMU for Windows themselves with WHPX support.
Accelerator string `mapstructure:"accelerator" required:"false"`
// The number of cpus to use when building the VM.
- // The default is 1 CPU.
+ // The default is `1` CPU.
CpuCount int `mapstructure:"cpus" required:"false"`
- // The interface to use for the disk. Allowed
- // values include any of ide, scsi, virtio or virtio-scsi*. Note
- // also that any boot commands or kickstart type scripts must have proper
- // adjustments for resulting device names. The Qemu builder uses virtio by
- // default.
+ // The interface to use for the disk. Allowed values include any of `ide`,
+ // `scsi`, `virtio` or `virtio-scsi`^\*. Note also that any boot commands
+ // or kickstart type scripts must have proper adjustments for resulting
+ // device names. The Qemu builder uses `virtio` by default.
+ //
+ // ^\* Please be aware that use of the `scsi` disk interface has been
+ // disabled by Red Hat due to a bug described
+ // [here](https://bugzilla.redhat.com/show_bug.cgi?id=1019220). If you are
+ // running Qemu on RHEL or a RHEL variant such as CentOS, you *must* choose
+ // one of the other listed interfaces. Using the `scsi` interface under
+ // these circumstances will cause the build to fail.
DiskInterface string `mapstructure:"disk_interface" required:"false"`
// The size, in megabytes, of the hard disk to create
// for the VM. By default, this is 40960 (40 GB).
DiskSize uint `mapstructure:"disk_size" required:"false"`
- // The cache mode to use for disk. Allowed values
- // include any of writethrough, writeback, none, unsafe
- // or directsync. By default, this is set to writeback.
+ // The cache mode to use for disk. Allowed values include any of
+ // `writethrough`, `writeback`, `none`, `unsafe` or `directsync`. By
+ // default, this is set to `writeback`.
DiskCache string `mapstructure:"disk_cache" required:"false"`
// The discard mode to use for disk. Allowed values
// include any of unmap or ignore. By default, this is set to ignore.
@@ -137,18 +157,21 @@ type Config struct {
// Apply compression to the QCOW2 disk file
// using qemu-img convert. Defaults to false.
DiskCompression bool `mapstructure:"disk_compression" required:"false"`
- // Either qcow2 or raw, this specifies the output
- // format of the virtual machine image. This defaults to qcow2.
+ // Either `qcow2` or `raw`, this specifies the output format of the virtual
+ // machine image. This defaults to `qcow2`.
Format string `mapstructure:"format" required:"false"`
// Packer defaults to building QEMU virtual machines by
// launching a GUI that shows the console of the machine being built. When this
- // value is set to true, the machine will start without a console.
+ // value is set to `true`, the machine will start without a console.
+ //
+ // You can still see the console if you make a note of the VNC display
+ // number chosen, and then connect using `vncviewer -Shared :`
Headless bool `mapstructure:"headless" required:"false"`
- // Packer defaults to building from an ISO file, this
- // parameter controls whether the ISO URL supplied is actually a bootable
- // QEMU image. When this value is set to true, the machine will either clone
- // the source or use it as a backing file (if use_backing_file is true);
- // then, it will resize the image according to disk_size and boot it.
+ // Packer defaults to building from an ISO file, this parameter controls
+ // whether the ISO URL supplied is actually a bootable QEMU image. When
+ // this value is set to `true`, the machine will either clone the source or
+ // use it as a backing file (if `use_backing_file` is `true`); then, it
+ // will resize the image according to `disk_size` and boot it.
DiskImage bool `mapstructure:"disk_image" required:"false"`
// Only applicable when disk_image is true
// and format is qcow2, set this option to true to create a new QCOW2
@@ -156,18 +179,18 @@ type Config struct {
// will only contain blocks that have changed compared to the backing file, so
// enabling this option can significantly reduce disk usage.
UseBackingFile bool `mapstructure:"use_backing_file" required:"false"`
- // The type of machine emulation to use. Run your
- // qemu binary with the flags -machine help to list available types for
- // your system. This defaults to pc.
+ // The type of machine emulation to use. Run your qemu binary with the
+ // flags `-machine help` to list available types for your system. This
+ // defaults to `pc`.
MachineType string `mapstructure:"machine_type" required:"false"`
// The amount of memory to use when building the VM
// in megabytes. This defaults to 512 megabytes.
MemorySize int `mapstructure:"memory" required:"false"`
- // The driver to use for the network interface. Allowed
- // values ne2k_pci, i82551, i82557b, i82559er, rtl8139, e1000,
- // pcnet, virtio, virtio-net, virtio-net-pci, usb-net, i82559a,
- // i82559b, i82559c, i82550, i82562, i82557a, i82557c, i82801,
- // vmxnet3, i82558a or i82558b. The Qemu builder uses virtio-net by
+ // The driver to use for the network interface. Allowed values `ne2k_pci`,
+ // `i82551`, `i82557b`, `i82559er`, `rtl8139`, `e1000`, `pcnet`, `virtio`,
+ // `virtio-net`, `virtio-net-pci`, `usb-net`, `i82559a`, `i82559b`,
+ // `i82559c`, `i82550`, `i82562`, `i82557a`, `i82557c`, `i82801`,
+ // `vmxnet3`, `i82558a` or `i82558b`. The Qemu builder uses `virtio-net` by
// default.
NetDevice string `mapstructure:"net_device" required:"false"`
// This is the path to the directory where the
@@ -177,11 +200,68 @@ type Config struct {
// the builder. By default this is output-BUILDNAME where "BUILDNAME" is the
// name of the build.
OutputDir string `mapstructure:"output_directory" required:"false"`
- // Allows complete control over the
- // qemu command line (though not, at this time, qemu-img). Each array of
- // strings makes up a command line switch that overrides matching default
- // switch/value pairs. Any value specified as an empty string is ignored. All
- // values after the switch are concatenated with no separator.
+ // Allows complete control over the qemu command line (though not, at this
+ // time, qemu-img). Each array of strings makes up a command line switch
+ // that overrides matching default switch/value pairs. Any value specified
+ // as an empty string is ignored. All values after the switch are
+ // concatenated with no separator.
+ //
+ // ~> **Warning:** The qemu command line allows extreme flexibility, so
+ // beware of conflicting arguments causing failures of your run. For
+ // instance, using --no-acpi could break the ability to send power signal
+ // type commands (e.g., shutdown -P now) to the virtual machine, thus
+ // preventing proper shutdown. To see the defaults, look in the packer.log
+ // file and search for the qemu-system-x86 command. The arguments are all
+ // printed for review.
+ //
+ // The following shows a sample usage:
+ //
+ // ``` json {
+ // "qemuargs": [
+ // [ "-m", "1024M" ],
+ // [ "--no-acpi", "" ],
+ // [
+ // "-netdev",
+ // "user,id=mynet0,",
+ // "hostfwd=hostip:hostport-guestip:guestport",
+ // ""
+ // ],
+ // [ "-device", "virtio-net,netdev=mynet0" ]
+ // ]
+ // } ```
+ //
+ // would produce the following (not including other defaults supplied by
+ // the builder and not otherwise conflicting with the qemuargs):
+ //
+ // ``` text qemu-system-x86 -m 1024m --no-acpi -netdev
+ // user,id=mynet0,hostfwd=hostip:hostport-guestip:guestport -device
+ // virtio-net,netdev=mynet0" ```
+ //
+ // ~> **Windows Users:** [QEMU for Windows](https://qemu.weilnetz.de/)
+ // builds are available though an environmental variable does need to be
+ // set for QEMU for Windows to redirect stdout to the console instead of
+ // stdout.txt.
+ //
+ // The following shows the environment variable that needs to be set for
+ // Windows QEMU support:
+ //
+ // ``` text setx SDL_STDIO_REDIRECT=0 ```
+ //
+ // You can also use the `SSHHostPort` template variable to produce a packer
+ // template that can be invoked by `make` in parallel:
+ //
+ // ``` json {
+ // "qemuargs": [
+ // [ "-netdev", "user,hostfwd=tcp::{{ .SSHHostPort }}-:22,id=forward"],
+ // [ "-device", "virtio-net,netdev=forward,id=net0"]
+ // ]
+ // } ```
+ //
+ // `make -j 3 my-awesome-packer-templates` spawns 3 packer processes, each
+ // of which will bind to their own SSH port as determined by each process.
+ // This will also work with WinRM, just change the port forward in
+ // `qemuargs` to map to WinRM's default port of `5985` or whatever value
+ // you have the service set to listen on.
QemuArgs [][]string `mapstructure:"qemuargs" required:"false"`
// The name of the Qemu binary to look for. This
// defaults to qemu-system-x86_64, but may need to be changed for
diff --git a/website/source/docs/builders/qemu.html.md.erb b/website/source/docs/builders/qemu.html.md.erb
index c139740bc..0dc543f1a 100644
--- a/website/source/docs/builders/qemu.html.md.erb
+++ b/website/source/docs/builders/qemu.html.md.erb
@@ -77,54 +77,24 @@ Note that you will need to set `"headless": true` if you are running Packer
on a Linux server without X11; or if you are connected via ssh to a remote
Linux server and have not enabled X11 forwarding (`ssh -X`).
+
+## ISO Configuration Reference
+
+<%= partial "partials/common/ISOConfig" %>
+
### Required:
-- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO
- files are so large, this is required and Packer will verify it prior to
- booting a virtual machine with the ISO attached. The type of the checksum is
- specified with `iso_checksum_type`, documented below. At least one of
- `iso_checksum` and `iso_checksum_url` must be defined. This has precedence
- over `iso_checksum_url` type.
-
-- `iso_checksum_type` (string) - The type of the checksum specified in
- `iso_checksum`. Valid values are `none`, `md5`, `sha1`, `sha256`, or
- `sha512` currently. While `none` will skip checksumming, this is not
- recommended since ISO files are generally large and corruption does happen
- from time to time.
-
-- `iso_checksum_url` (string) - A URL to a GNU or BSD style checksum file
- containing a checksum for the OS ISO file. At least one of `iso_checksum`
- and `iso_checksum_url` must be defined. This will be ignored if
- `iso_checksum` is non empty.
-
-- `iso_url` (string) - A URL to the ISO containing the installation image.
- This URL can be either an HTTP URL or a file URL (or path to a file). If
- this is an HTTP URL, Packer will download it and cache it between runs.
- This can also be a URL to an IMG or QCOW2 file, in which case QEMU will
- boot directly from it. When passing a path to an IMG or QCOW2 file, you
- should set `disk_image` to `true`.
+<%= partial "partials/common/ISOConfig-required" %>
### Optional:
-- `accelerator` (string) - The accelerator type to use when running the VM.
- This may be `none`, `kvm`, `tcg`, `hax`, `hvf`, `whpx`, or `xen`. The appropriate
- software must have already been installed on your build machine to use the
- accelerator you specified. When no accelerator is specified, Packer will try
- to use `kvm` if it is available but will default to `tcg` otherwise.
+<%= partial "partials/common/ISOConfig-not-required" %>
- -> The `hax` accelerator has issues attaching CDROM ISOs. This is an
- upstream issue which can be tracked
- [here](https://github.com/intel/haxm/issues/20).
+## Specific Configuration Reference
- -> The `hvf` and `whpx` accelerator are new and experimental as of
- [QEMU 2.12.0](https://wiki.qemu.org/ChangeLog/2.12#Host_support).
- You may encounter issues unrelated to Packer when using these. You may need to
- add [ "-global", "virtio-pci.disable-modern=on" ] to `qemuargs` depending on the
- guest operating system.
-
- -> For `whpx`, note that [Stefan Weil's QEMU for Windows distribution](https://qemu.weilnetz.de/w64/)
- does not include WHPX support and users may need to compile or source a
- build of QEMU for Windows themselves with WHPX support.
+### Optional:
+<%= partial "partials/builder/qemu/Config-not-required" %>
+DADA
- `boot_command` (array of strings) - This is an array of commands to type
when the virtual machine is first booted. The goal of these commands should
@@ -139,46 +109,6 @@ Linux server and have not enabled X11 forwarding (`ssh -X`).
five seconds and one minute 30 seconds, respectively. If this isn't
specified, the default is `10s` or 10 seconds.
-- `cpus` (number) - The number of cpus to use when building the VM.
- The default is `1` CPU.
-
-- `disk_cache` (string) - The cache mode to use for disk. Allowed values
- include any of `writethrough`, `writeback`, `none`, `unsafe`
- or `directsync`. By default, this is set to `writeback`.
-
-- `disk_compression` (boolean) - Apply compression to the QCOW2 disk file
- using `qemu-img convert`. Defaults to `false`.
-
-- `disk_discard` (string) - The discard mode to use for disk. Allowed values
- include any of `unmap` or `ignore`. By default, this is set to `ignore`.
-
-- `disk_detect_zeroes` (string) - The detect-zeroes mode to use for disk.
- Allowed values include any of `unmap`, `on` or `off`. Defaults to `off`.
- When the value is "off" we don't set the flag in the qemu command, so that
- Packer still works with old versions of QEMU that don't have this option.
-
-- `disk_image` (boolean) - Packer defaults to building from an ISO file, this
- parameter controls whether the ISO URL supplied is actually a bootable
- QEMU image. When this value is set to `true`, the machine will either clone
- the source or use it as a backing file (if `use_backing_file` is `true`);
- then, it will resize the image according to `disk_size` and boot it.
-
-- `disk_interface` (string) - The interface to use for the disk. Allowed
- values include any of `ide`, `scsi`, `virtio` or `virtio-scsi`^\*. Note
- also that any boot commands or kickstart type scripts must have proper
- adjustments for resulting device names. The Qemu builder uses `virtio` by
- default.
-
- ^\* Please be aware that use of the `scsi` disk interface has been disabled
- by Red Hat due to a bug described
- [here](https://bugzilla.redhat.com/show_bug.cgi?id=1019220).
- If you are running Qemu on RHEL or a RHEL variant such as CentOS, you
- *must* choose one of the other listed interfaces. Using the `scsi`
- interface under these circumstances will cause the build to fail.
-
-- `disk_size` (number) - The size, in megabytes, of the hard disk to create
- for the VM. By default, this is `40960` (40 GB).
-
- `floppy_dirs` (array of strings) - A list of directories to place onto
the floppy disk recursively. This is similar to the `floppy_files` option
except that the directory structure is preserved. This is useful for when
@@ -199,16 +129,6 @@ Linux server and have not enabled X11 forwarding (`ssh -X`).
listed files must not exceed 1.44 MB. The supported ways to move large
files into the OS are using `http_directory` or [the file provisioner](https://www.packer.io/docs/provisioners/file.html).
-- `format` (string) - Either `qcow2` or `raw`, this specifies the output
- format of the virtual machine image. This defaults to `qcow2`.
-
-- `headless` (boolean) - Packer defaults to building QEMU virtual machines by
- launching a GUI that shows the console of the machine being built. When this
- value is set to `true`, the machine will start without a console.
-
- You can still see the console if you make a note of the VNC display
- number chosen, and then connect using `vncviewer -Shared :`
-
- `http_directory` (string) - Path to a directory to serve using an
HTTP server. The files in this directory will be available over HTTP that
will be requestable from the virtual machine. This is useful for hosting
@@ -224,162 +144,6 @@ Linux server and have not enabled X11 forwarding (`ssh -X`).
to force the HTTP server to be on one port, make this minimum and maximum
port the same. By default the values are `8000` and `9000`, respectively.
-- `iso_skip_cache` (boolean) - Use iso from provided url. Qemu must support
- curl block device. This defaults to `false`.
-
-- `iso_target_extension` (string) - The extension of the iso file after
- download. This defaults to `iso`.
-
-- `iso_target_path` (string) - The path where the iso should be saved after
- download. By default will go in the packer cache, with a hash of the
- original filename as its name.
-
-- `iso_urls` (array of strings) - Multiple URLs for the ISO to download.
- Packer will try these in order. If anything goes wrong attempting to
- download or while downloading a single URL, it will move on to the next. All
- URLs must point to the same file (same checksum). By default this is empty
- and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified.
-
-- `machine_type` (string) - The type of machine emulation to use. Run your
- qemu binary with the flags `-machine help` to list available types for
- your system. This defaults to `pc`.
-
-- `memory` (number) - The amount of memory to use when building the VM
- in megabytes. This defaults to `512` megabytes.
-
-- `net_device` (string) - The driver to use for the network interface. Allowed
- values `ne2k_pci`, `i82551`, `i82557b`, `i82559er`, `rtl8139`, `e1000`,
- `pcnet`, `virtio`, `virtio-net`, `virtio-net-pci`, `usb-net`, `i82559a`,
- `i82559b`, `i82559c`, `i82550`, `i82562`, `i82557a`, `i82557c`, `i82801`,
- `vmxnet3`, `i82558a` or `i82558b`. The Qemu builder uses `virtio-net` by
- default.
-
-- `output_directory` (string) - This is the path to the directory where the
- resulting virtual machine will be created. This may be relative or absolute.
- If relative, the path is relative to the working directory when `packer`
- is executed. This directory must not exist or be empty prior to running
- the builder. By default this is `output-BUILDNAME` where "BUILDNAME" is the
- name of the build.
-
-- `qemu_binary` (string) - The name of the Qemu binary to look for. This
- defaults to `qemu-system-x86_64`, but may need to be changed for
- some platforms. For example `qemu-kvm`, or `qemu-system-i386` may be a
- better choice for some systems.
-
-- `qemuargs` (array of array of strings) - Allows complete control over the
- qemu command line (though not, at this time, qemu-img). Each array of
- strings makes up a command line switch that overrides matching default
- switch/value pairs. Any value specified as an empty string is ignored. All
- values after the switch are concatenated with no separator.
-
- ~> **Warning:** The qemu command line allows extreme flexibility, so beware
- of conflicting arguments causing failures of your run. For instance, using
- --no-acpi could break the ability to send power signal type commands (e.g.,
- shutdown -P now) to the virtual machine, thus preventing proper shutdown. To see
- the defaults, look in the packer.log file and search for the qemu-system-x86
- command. The arguments are all printed for review.
-
- The following shows a sample usage:
-
- ``` json
- {
- "qemuargs": [
- [ "-m", "1024M" ],
- [ "--no-acpi", "" ],
- [
- "-netdev",
- "user,id=mynet0,",
- "hostfwd=hostip:hostport-guestip:guestport",
- ""
- ],
- [ "-device", "virtio-net,netdev=mynet0" ]
- ]
- }
- ```
-
- would produce the following (not including other defaults supplied by the
- builder and not otherwise conflicting with the qemuargs):
-
- ``` text
- qemu-system-x86 -m 1024m --no-acpi -netdev user,id=mynet0,hostfwd=hostip:hostport-guestip:guestport -device virtio-net,netdev=mynet0"
- ```
-
- ~> **Windows Users:** [QEMU for Windows](https://qemu.weilnetz.de/) builds are available though an environmental variable does need
- to be set for QEMU for Windows to redirect stdout to the console instead of stdout.txt.
-
- The following shows the environment variable that needs to be set for Windows QEMU support:
-
- ``` text
- setx SDL_STDIO_REDIRECT=0
- ```
-
- You can also use the `SSHHostPort` template variable to produce a packer
- template that can be invoked by `make` in parallel:
-
- ``` json
- {
- "qemuargs": [
- [ "-netdev", "user,hostfwd=tcp::{{ .SSHHostPort }}-:22,id=forward"],
- [ "-device", "virtio-net,netdev=forward,id=net0"]
- ]
- }
- ```
-
- `make -j 3 my-awesome-packer-templates` spawns 3 packer processes, each of which
- will bind to their own SSH port as determined by each process. This will also
- work with WinRM, just change the port forward in `qemuargs` to map to WinRM's
- default port of `5985` or whatever value you have the service set to listen on.
-
-- `use_backing_file` (boolean) - Only applicable when `disk_image` is `true`
- and `format` is `qcow2`, set this option to `true` to create a new QCOW2
- file that uses the file located at `iso_url` as a backing file. The new file
- will only contain blocks that have changed compared to the backing file, so
- enabling this option can significantly reduce disk usage.
-
-- `use_default_display` (boolean) - If true, do not pass a `-display` option
- to qemu, allowing it to choose the default. This may be needed when running
- under macOS, and getting errors about `sdl` not being available.
-
-- `shutdown_command` (string) - The command to use to gracefully shut down the
- machine once all the provisioning is done. By default this is an empty
- string, which tells Packer to just forcefully shut down the machine unless a
- shutdown command takes place inside script so this may safely be omitted. It
- is important to add a `shutdown_command`. By default Packer halts the virtual
- machine and the file system may not be sync'd. Thus, changes made in a
- provisioner might not be saved. If one or more scripts require a reboot it is
- suggested to leave this blank since reboots may fail and specify the final
- shutdown command in your last script.
-
-- `shutdown_timeout` (string) - The amount of time to wait after executing the
- `shutdown_command` for the virtual machine to actually shut down. If it
- doesn't shut down in this time, it is an error. By default, the timeout is
- `5m` or five minutes.
-
-- `skip_compaction` (boolean) - Packer compacts the QCOW2 image using
- `qemu-img convert`. Set this option to `true` to disable compacting.
- Defaults to `false`.
-
-- `ssh_host_port_min` and `ssh_host_port_max` (number) - The minimum and
- maximum port to use for the SSH port on the host machine which is forwarded
- to the SSH port on the guest machine. Because Packer often runs in parallel,
- Packer will choose a randomly available port in this range to use as the
- host port. By default this is `2222` to `4444`.
-
-- `vm_name` (string) - This is the name of the image (QCOW2 or IMG) file for
- the new virtual machine. By default this is `packer-BUILDNAME`, where
- "BUILDNAME" is the name of the build. Currently, no file extension will be
- used unless it is specified in this option.
-
-- `vnc_bind_address` (string / IP address) - The IP address that should be
- binded to for VNC. By default packer will use `127.0.0.1` for this. If you
- wish to bind to all interfaces use `0.0.0.0`.
-
-- `vnc_port_min` and `vnc_port_max` (number) - The minimum and maximum port
- to use for VNC access to the virtual machine. The builder uses VNC to type
- the initial `boot_command`. Because Packer generally runs in parallel,
- Packer uses a randomly chosen port in this range that appears available. By
- default this is `5900` to `6000`. The minimum and maximum ports are inclusive.
-
## Boot Command
The `boot_command` configuration is very important: it specifies the keys to
diff --git a/website/source/partials/builder/qemu/_Config-not-required.html.md b/website/source/partials/builder/qemu/_Config-not-required.html.md
index f70a9979e..792489a08 100644
--- a/website/source/partials/builder/qemu/_Config-not-required.html.md
+++ b/website/source/partials/builder/qemu/_Config-not-required.html.md
@@ -4,13 +4,27 @@
curl block device. This defaults to false.
- `accelerator` (string) - The accelerator type to use when running the VM.
- This may be none, kvm, tcg, hax, hvf, whpx, or xen. The appropriate
+ This may be `none`, `kvm`, `tcg`, `hax`, `hvf`, `whpx`, or `xen`. The appropriate
software must have already been installed on your build machine to use the
accelerator you specified. When no accelerator is specified, Packer will try
- to use kvm if it is available but will default to tcg otherwise.
+ to use `kvm` if it is available but will default to `tcg` otherwise.
+
+ -> The `hax` accelerator has issues attaching CDROM ISOs. This is an
+ upstream issue which can be tracked
+ [here](https://github.com/intel/haxm/issues/20).
+
+ -> The `hvf` and `whpx` accelerator are new and experimental as of
+ [QEMU 2.12.0](https://wiki.qemu.org/ChangeLog/2.12#Host_support).
+ You may encounter issues unrelated to Packer when using these. You may need to
+ add [ "-global", "virtio-pci.disable-modern=on" ] to `qemuargs` depending on the
+ guest operating system.
+
+ -> For `whpx`, note that [Stefan Weil's QEMU for Windows distribution](https://qemu.weilnetz.de/w64/)
+ does not include WHPX support and users may need to compile or source a
+ build of QEMU for Windows themselves with WHPX support.
- `cpus` (int) - The number of cpus to use when building the VM.
- The default is 1 CPU.
+ The default is `1` CPU.
- `disk_interface` (string) - The interface to use for the disk. Allowed
values include any of ide, scsi, virtio or virtio-scsi*. Note
@@ -21,9 +35,9 @@
- `disk_size` (uint) - The size, in megabytes, of the hard disk to create
for the VM. By default, this is 40960 (40 GB).
-- `disk_cache` (string) - The cache mode to use for disk. Allowed values
- include any of writethrough, writeback, none, unsafe
- or directsync. By default, this is set to writeback.
+- `disk_cache` (string) - The cache mode to use for disk. Allowed values include any of
+ `writethrough`, `writeback`, `none`, `unsafe` or `directsync`. By
+ default, this is set to `writeback`.
- `disk_discard` (string) - The discard mode to use for disk. Allowed values
include any of unmap or ignore. By default, this is set to ignore.
@@ -47,11 +61,11 @@
launching a GUI that shows the console of the machine being built. When this
value is set to true, the machine will start without a console.
-- `disk_image` (bool) - Packer defaults to building from an ISO file, this
- parameter controls whether the ISO URL supplied is actually a bootable
- QEMU image. When this value is set to true, the machine will either clone
- the source or use it as a backing file (if use_backing_file is true);
- then, it will resize the image according to disk_size and boot it.
+- `disk_image` (bool) - Packer defaults to building from an ISO file, this parameter controls
+ whether the ISO URL supplied is actually a bootable QEMU image. When
+ this value is set to `true`, the machine will either clone the source or
+ use it as a backing file (if `use_backing_file` is `true`); then, it
+ will resize the image according to `disk_size` and boot it.
- `use_backing_file` (bool) - Only applicable when disk_image is true
and format is qcow2, set this option to true to create a new QCOW2
@@ -91,16 +105,6 @@
some platforms. For example qemu-kvm, or qemu-system-i386 may be a
better choice for some systems.
-- `shutdown_command` (string) - The command to use to gracefully shut down the
- machine once all the provisioning is done. By default this is an empty
- string, which tells Packer to just forcefully shut down the machine unless a
- shutdown command takes place inside script so this may safely be omitted. It
- is important to add a shutdown_command. By default Packer halts the virtual
- machine and the file system may not be sync'd. Thus, changes made in a
- provisioner might not be saved. If one or more scripts require a reboot it is
- suggested to leave this blank since reboots may fail and specify the final
- shutdown command in your last script.
-
- `ssh_host_port_min` (int) - The minimum and
maximum port to use for the SSH port on the host machine which is forwarded
to the SSH port on the guest machine. Because Packer often runs in parallel,
@@ -132,9 +136,4 @@
TODO(@mitchellh): remove
- `run_once` (bool) - TODO(mitchellh): deprecate
-
-- `shutdown_timeout` (string) - The amount of time to wait after executing the
- shutdown_command for the virtual machine to actually shut down. If it
- doesn't shut down in this time, it is an error. By default, the timeout is
- 5m or five minutes.
\ No newline at end of file
From 3b66cac61795d8cdb3359ac6a5e1e69b1525e63a Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Thu, 13 Jun 2019 11:18:04 +0200
Subject: [PATCH 46/97] document HTTPConfig from struct
---
common/http_config.go | 28 ++++++++++++++++---
website/source/docs/builders/qemu.html.md.erb | 23 ++++++---------
.../common/_HTTPConfig-not-required.html.md | 17 +++++++++++
.../partials/common/_HTTPConfig.html.md | 8 ++++++
4 files changed, 57 insertions(+), 19 deletions(-)
create mode 100644 website/source/partials/common/_HTTPConfig-not-required.html.md
create mode 100644 website/source/partials/common/_HTTPConfig.html.md
diff --git a/common/http_config.go b/common/http_config.go
index 8a6809ac4..8e4c4845a 100644
--- a/common/http_config.go
+++ b/common/http_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
@@ -6,11 +8,29 @@ import (
"github.com/hashicorp/packer/template/interpolate"
)
-// HTTPConfig contains configuration for the local HTTP Server
+// Packer will create an http server serving `http_directory` when it is set, a
+// random free port will be selected and the architecture of the directory
+// referenced will be available in your builder.
+//
+// Example usage from a builder:
+//
+// `wget http://{{ .HTTPIP }}:{{ .HTTPPort }}/foo/bar/preseed.cfg`
type HTTPConfig struct {
- HTTPDir string `mapstructure:"http_directory"`
- HTTPPortMin int `mapstructure:"http_port_min"`
- HTTPPortMax int `mapstructure:"http_port_max"`
+ // Path to a directory to serve using an HTTP server. The files in this
+ // directory will be available over HTTP that will be requestable from the
+ // virtual machine. This is useful for hosting kickstart files and so on.
+ // By default this is an empty string, which means no HTTP server will be
+ // started. The address and port of the HTTP server will be available as
+ // variables in `boot_command`. This is covered in more detail below.
+ HTTPDir string `mapstructure:"http_directory"`
+ // These are the minimum and maximum port to use for the HTTP server
+ // started to serve the `http_directory`. Because Packer often runs in
+ // parallel, Packer will choose a randomly available port in this range to
+ // run the HTTP server. If you want to force the HTTP server to be on one
+ // port, make this minimum and maximum port the same. By default the values
+ // are `8000` and `9000`, respectively.
+ HTTPPortMin int `mapstructure:"http_port_min"`
+ HTTPPortMax int `mapstructure:"http_port_max"`
}
func (c *HTTPConfig) Prepare(ctx *interpolate.Context) []error {
diff --git a/website/source/docs/builders/qemu.html.md.erb b/website/source/docs/builders/qemu.html.md.erb
index 0dc543f1a..422965dde 100644
--- a/website/source/docs/builders/qemu.html.md.erb
+++ b/website/source/docs/builders/qemu.html.md.erb
@@ -90,7 +90,14 @@ Linux server and have not enabled X11 forwarding (`ssh -X`).
<%= partial "partials/common/ISOConfig-not-required" %>
-## Specific Configuration Reference
+## Http directory configuration reference
+
+<%= partial "partials/common/HTTPConfig" %>
+### Optional:
+
+<%= partial "partials/common/HTTPConfig-not-required" %>
+
+## Qemu Configuration Reference
### Optional:
<%= partial "partials/builder/qemu/Config-not-required" %>
@@ -129,20 +136,6 @@ DADA
listed files must not exceed 1.44 MB. The supported ways to move large
files into the OS are using `http_directory` or [the file provisioner](https://www.packer.io/docs/provisioners/file.html).
-- `http_directory` (string) - Path to a directory to serve using an
- HTTP server. The files in this directory will be available over HTTP that
- will be requestable from the virtual machine. This is useful for hosting
- kickstart files and so on. By default this is an empty string, which means
- no HTTP server will be started. The address and port of the HTTP server will
- be available as variables in `boot_command`. This is covered in more detail
- below.
-
-- `http_port_min` and `http_port_max` (number) - These are the minimum and
- maximum port to use for the HTTP server started to serve the
- `http_directory`. Because Packer often runs in parallel, Packer will choose
- a randomly available port in this range to run the HTTP server. If you want
- to force the HTTP server to be on one port, make this minimum and maximum
- port the same. By default the values are `8000` and `9000`, respectively.
## Boot Command
diff --git a/website/source/partials/common/_HTTPConfig-not-required.html.md b/website/source/partials/common/_HTTPConfig-not-required.html.md
new file mode 100644
index 000000000..7ad792afe
--- /dev/null
+++ b/website/source/partials/common/_HTTPConfig-not-required.html.md
@@ -0,0 +1,17 @@
+
+
+- `http_directory` (string) - Path to a directory to serve using an HTTP server. The files in this
+ directory will be available over HTTP that will be requestable from the
+ virtual machine. This is useful for hosting kickstart files and so on.
+ By default this is an empty string, which means no HTTP server will be
+ started. The address and port of the HTTP server will be available as
+ variables in `boot_command`. This is covered in more detail below.
+
+- `http_port_min` (int) - These are the minimum and maximum port to use for the HTTP server
+ started to serve the `http_directory`. Because Packer often runs in
+ parallel, Packer will choose a randomly available port in this range to
+ run the HTTP server. If you want to force the HTTP server to be on one
+ port, make this minimum and maximum port the same. By default the values
+ are `8000` and `9000`, respectively.
+
+- `http_port_max` (int) - HTTP Port Max
\ No newline at end of file
diff --git a/website/source/partials/common/_HTTPConfig.html.md b/website/source/partials/common/_HTTPConfig.html.md
new file mode 100644
index 000000000..225422086
--- /dev/null
+++ b/website/source/partials/common/_HTTPConfig.html.md
@@ -0,0 +1,8 @@
+
+Packer will create an http server serving `http_directory` when it is set, a
+random free port will be selected and the architecture of the directory
+referenced will be available in your builder.
+
+Example usage from a builder:
+
+ `wget http://{{ .HTTPIP }}:{{ .HTTPPort }}/foo/bar/preseed.cfg`
From b48d22b43b7911ddf747a3904a3ef5a1deaa7172 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Thu, 13 Jun 2019 14:29:25 +0200
Subject: [PATCH 47/97] qemu: document FloppyConfig from struct
---
common/floppy_config.go | 23 ++++++++++++++-
website/source/docs/builders/qemu.html.md.erb | 29 +++++--------------
.../common/_FloppyConfig-not-required.html.md | 15 ++++++++++
.../partials/common/_FloppyConfig.html.md | 9 ++++++
4 files changed, 54 insertions(+), 22 deletions(-)
create mode 100644 website/source/partials/common/_FloppyConfig-not-required.html.md
create mode 100644 website/source/partials/common/_FloppyConfig.html.md
diff --git a/common/floppy_config.go b/common/floppy_config.go
index b1338cbcb..43e1ed414 100644
--- a/common/floppy_config.go
+++ b/common/floppy_config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package common
import (
@@ -9,8 +11,27 @@ import (
"github.com/hashicorp/packer/template/interpolate"
)
+// A floppy can be made available for your build. This is most useful for
+// unattended Windows installs, which look for an Autounattend.xml file on
+// removable media. By default, no floppy will be attached. All files listed in
+// this setting get placed into the root directory of the floppy and the floppy
+// is attached as the first floppy device. The summary size of the listed files
+// must not exceed 1.44 MB. The supported ways to move large files into the OS
+// are using `http_directory` or [the file
+// provisioner](https://www.packer.io/docs/provisioners/file.html).
type FloppyConfig struct {
- FloppyFiles []string `mapstructure:"floppy_files"`
+ // A list of files to place onto a floppy disk that is attached when the VM
+ // is booted. Currently, no support exists for creating sub-directories on
+ // the floppy. Wildcard characters (\*, ?, and \[\]) are allowed. Directory
+ // names are also allowed, which will add all the files found in the
+ // directory to the floppy.
+ FloppyFiles []string `mapstructure:"floppy_files"`
+ // A list of directories to place onto the floppy disk recursively. This is
+ // similar to the `floppy_files` option except that the directory structure
+ // is preserved. This is useful for when your floppy disk includes drivers
+ // or if you just want to organize it's contents as a hierarchy. Wildcard
+ // characters (\*, ?, and \[\]) are allowed. The maximum summary size of
+ // all files in the listed directories are the same as in `floppy_files`.
FloppyDirectories []string `mapstructure:"floppy_dirs"`
}
diff --git a/website/source/docs/builders/qemu.html.md.erb b/website/source/docs/builders/qemu.html.md.erb
index 422965dde..f5c7d6f99 100644
--- a/website/source/docs/builders/qemu.html.md.erb
+++ b/website/source/docs/builders/qemu.html.md.erb
@@ -97,6 +97,14 @@ Linux server and have not enabled X11 forwarding (`ssh -X`).
<%= partial "partials/common/HTTPConfig-not-required" %>
+## Floppy configuration reference
+
+<%= partial "partials/common/FloppyConfig" %>
+
+### Optional:
+
+<%= partial "partials/common/FloppyConfig-not-required" %>
+
## Qemu Configuration Reference
### Optional:
@@ -116,27 +124,6 @@ DADA
five seconds and one minute 30 seconds, respectively. If this isn't
specified, the default is `10s` or 10 seconds.
-- `floppy_dirs` (array of strings) - A list of directories to place onto
- the floppy disk recursively. This is similar to the `floppy_files` option
- except that the directory structure is preserved. This is useful for when
- your floppy disk includes drivers or if you just want to organize it's
- contents as a hierarchy. Wildcard characters (\*, ?, and \[\]) are allowed.
- The maximum summary size of all files in the listed directories are the
- same as in `floppy_files`.
-
-- `floppy_files` (array of strings) - A list of files to place onto a floppy
- disk that is attached when the VM is booted. This is most useful for
- unattended Windows installs, which look for an `Autounattend.xml` file on
- removable media. By default, no floppy will be attached. All files listed in
- this setting get placed into the root directory of the floppy and the floppy
- is attached as the first floppy device. Currently, no support exists for
- creating sub-directories on the floppy. Wildcard characters (\*, ?,
- and \[\]) are allowed. Directory names are also allowed, which will add all
- the files found in the directory to the floppy. The summary size of the
- listed files must not exceed 1.44 MB. The supported ways to move large
- files into the OS are using `http_directory` or [the file provisioner](https://www.packer.io/docs/provisioners/file.html).
-
-
## Boot Command
The `boot_command` configuration is very important: it specifies the keys to
diff --git a/website/source/partials/common/_FloppyConfig-not-required.html.md b/website/source/partials/common/_FloppyConfig-not-required.html.md
new file mode 100644
index 000000000..ec91a3356
--- /dev/null
+++ b/website/source/partials/common/_FloppyConfig-not-required.html.md
@@ -0,0 +1,15 @@
+
+
+- `floppy_files` ([]string) - A list of files to place onto a floppy disk that is attached when the VM
+ is booted. Currently, no support exists for creating sub-directories on
+ the floppy. Wildcard characters (\*, ?, and \[\]) are allowed. Directory
+ names are also allowed, which will add all the files found in the
+ directory to the floppy.
+
+- `floppy_dirs` ([]string) - A list of directories to place onto the floppy disk recursively. This is
+ similar to the `floppy_files` option except that the directory structure
+ is preserved. This is useful for when your floppy disk includes drivers
+ or if you just want to organize it's contents as a hierarchy. Wildcard
+ characters (\*, ?, and \[\]) are allowed. The maximum summary size of
+ all files in the listed directories are the same as in `floppy_files`.
+
\ No newline at end of file
diff --git a/website/source/partials/common/_FloppyConfig.html.md b/website/source/partials/common/_FloppyConfig.html.md
new file mode 100644
index 000000000..d302ba7d2
--- /dev/null
+++ b/website/source/partials/common/_FloppyConfig.html.md
@@ -0,0 +1,9 @@
+
+A floppy can be made available for your build. This is most useful for
+unattended Windows installs, which look for an Autounattend.xml file on
+removable media. By default, no floppy will be attached. All files listed in
+this setting get placed into the root directory of the floppy and the floppy
+is attached as the first floppy device. The summary size of the listed files
+must not exceed 1.44 MB. The supported ways to move large files into the OS
+are using `http_directory` or [the file
+provisioner](https://www.packer.io/docs/provisioners/file.html).
From 3bb8c929022ac102fdc2524c1d21ad24e5876207 Mon Sep 17 00:00:00 2001
From: Adrien Delorme
Date: Thu, 13 Jun 2019 16:08:15 +0200
Subject: [PATCH 48/97] qemu: document boot command from struct
---
common/bootcommand/config.go | 127 +++++++++++++++++-
website/source/docs/builders/qemu.html.md.erb | 63 +--------
.../builder/qemu/_Config-not-required.html.md | 110 ++++++++++++---
.../_BootConfig-not-required.html.md | 20 +++
.../common/bootcommand/_BootConfig.html.md | 80 +++++++++++
.../_VNCConfig-not-required.html.md | 7 +
.../common/bootcommand/_VNCConfig.html.md | 18 +++
7 files changed, 340 insertions(+), 85 deletions(-)
create mode 100644 website/source/partials/common/bootcommand/_BootConfig-not-required.html.md
create mode 100644 website/source/partials/common/bootcommand/_BootConfig.html.md
create mode 100644 website/source/partials/common/bootcommand/_VNCConfig-not-required.html.md
create mode 100644 website/source/partials/common/bootcommand/_VNCConfig.html.md
diff --git a/common/bootcommand/config.go b/common/bootcommand/config.go
index d01e69624..e50c7288b 100644
--- a/common/bootcommand/config.go
+++ b/common/bootcommand/config.go
@@ -1,3 +1,5 @@
+//go:generate struct-markdown
+
package bootcommand
import (
@@ -8,18 +10,131 @@ import (
"github.com/hashicorp/packer/template/interpolate"
)
+// The boot configuration is very important: `boot_command` specifies the keys
+// to type when the virtual machine is first booted in order to start the OS
+// installer. This command is typed after boot_wait, which gives the virtual
+// machine some time to actually load.
+//
+// The boot_command is an array of strings. The strings are all typed in
+// sequence. It is an array only to improve readability within the template.
+//
+// There are a set of special keys available. If these are in your boot
+// command, they will be replaced by the proper key:
+//
+// - `` - Backspace
+//
+// - `` - Delete
+//
+// - `` - Simulates an actual "enter" or "return" keypress.
+//
+// - `` - Simulates pressing the escape key.
+//
+// - `` - Simulates pressing the tab key.
+//
+// - ` - ` - Simulates pressing a function key.
+//
+// - `` - Simulates pressing an arrow key.
+//
+// - `` - Simulates pressing the spacebar.
+//
+// - `` - Simulates pressing the insert key.
+//
+// - `` - Simulates pressing the home and end keys.
+//
+// - `` - Simulates pressing the page up and page down
+// keys.
+//
+// - `