From aa954c9406f9440d5654ecc1443ac337ef7ae3d3 Mon Sep 17 00:00:00 2001 From: Marc Siegfriedt Date: Wed, 24 Jun 2015 11:58:00 -0700 Subject: [PATCH 001/100] updated how vmx entries are handled --- builder/vmware/common/step_configure_vmx.go | 2 -- builder/vmware/common/vmx.go | 20 ++++++++++++++++---- 2 files changed, 16 insertions(+), 6 deletions(-) mode change 100644 => 100755 builder/vmware/common/step_configure_vmx.go mode change 100644 => 100755 builder/vmware/common/vmx.go diff --git a/builder/vmware/common/step_configure_vmx.go b/builder/vmware/common/step_configure_vmx.go old mode 100644 new mode 100755 index 401d53055..14c68e76a --- a/builder/vmware/common/step_configure_vmx.go +++ b/builder/vmware/common/step_configure_vmx.go @@ -5,7 +5,6 @@ import ( "io/ioutil" "log" "regexp" - "strings" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" @@ -53,7 +52,6 @@ func (s *StepConfigureVMX) Run(state multistep.StateBag) multistep.StepAction { // Set custom data for k, v := range s.CustomData { log.Printf("Setting VMX: '%s' = '%s'", k, v) - k = strings.ToLower(k) vmxData[k] = v } diff --git a/builder/vmware/common/vmx.go b/builder/vmware/common/vmx.go old mode 100644 new mode 100755 index e7cdb662f..ab0291807 --- a/builder/vmware/common/vmx.go +++ b/builder/vmware/common/vmx.go @@ -17,7 +17,7 @@ import ( func ParseVMX(contents string) map[string]string { results := make(map[string]string) - lineRe := regexp.MustCompile(`^(.+?)\s*=\s*"(.*?)"\s*$`) + lineRe := regexp.MustCompile(`^(.+?)\s*=\s*"?(.*?)"?\s*$`) for _, line := range strings.Split(contents, "\n") { matches := lineRe.FindStringSubmatch(line) @@ -25,8 +25,7 @@ func ParseVMX(contents string) map[string]string { continue } - key := strings.ToLower(matches[1]) - results[key] = matches[2] + results[matches[1]] = matches[2] } return results @@ -43,9 +42,22 @@ func EncodeVMX(contents map[string]string) string { i++ } + // a list of VMX key fragments that should not be wrapped in quotes, + // fragments because multiple disks can use the virtualSSD suffix + noQuotes := []string { + "virtualSSD", + } + sort.Strings(keys) for _, k := range keys { - buf.WriteString(fmt.Sprintf("%s = \"%s\"\n", k, contents[k])) + pat := "%s = \"%s\"\n" + for _, q := range noQuotes { + if strings.Contains(k, q) { + pat = "%s = %s\n" + break; + } + } + buf.WriteString(fmt.Sprintf(pat, k, contents[k])) } return buf.String() From 5e1ea753d4fa7fe4c463557d3543a6c6ac7292bc Mon Sep 17 00:00:00 2001 From: Marc Siegfriedt Date: Wed, 1 Jul 2015 11:30:39 -0700 Subject: [PATCH 002/100] Fixed case sensitive issue with VMX entries not being overwritten --- builder/vmware/common/step_clean_vmx.go | 4 ++-- builder/vmware/vmx/step_clone_vmx.go | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) mode change 100644 => 100755 builder/vmware/common/step_clean_vmx.go mode change 100644 => 100755 builder/vmware/vmx/step_clone_vmx.go diff --git a/builder/vmware/common/step_clean_vmx.go b/builder/vmware/common/step_clean_vmx.go old mode 100644 new mode 100755 index bf76f5863..44bf4c407 --- a/builder/vmware/common/step_clean_vmx.go +++ b/builder/vmware/common/step_clean_vmx.go @@ -51,8 +51,8 @@ func (s StepCleanVMX) Run(state multistep.StateBag) multistep.StepAction { ui.Message("Detaching ISO from CD-ROM device...") - vmxData[ide+"devicetype"] = "cdrom-raw" - vmxData[ide+"filename"] = "auto detect" + vmxData[ide+"deviceType"] = "cdrom-raw" + vmxData[ide+"fileName"] = "auto detect" } ui.Message("Disabling VNC server...") diff --git a/builder/vmware/vmx/step_clone_vmx.go b/builder/vmware/vmx/step_clone_vmx.go old mode 100644 new mode 100755 index a020e1627..1dbae678a --- a/builder/vmware/vmx/step_clone_vmx.go +++ b/builder/vmware/vmx/step_clone_vmx.go @@ -38,14 +38,14 @@ func (s *StepCloneVMX) Run(state multistep.StateBag) multistep.StepAction { } var diskName string - if _, ok := vmxData["scsi0:0.filename"]; ok { - diskName = vmxData["scsi0:0.filename"] + if _, ok := vmxData["scsi0:0.fileName"]; ok { + diskName = vmxData["scsi0:0.fileName"] } - if _, ok := vmxData["sata0:0.filename"]; ok { - diskName = vmxData["sata0:0.filename"] + if _, ok := vmxData["sata0:0.fileName"]; ok { + diskName = vmxData["sata0:0.fileName"] } - if _, ok := vmxData["ide0:0.filename"]; ok { - diskName = vmxData["ide0:0.filename"] + if _, ok := vmxData["ide0:0.fileName"]; ok { + diskName = vmxData["ide0:0.fileName"] } if diskName == "" { err := fmt.Errorf("Root disk filename could not be found!") From ded78d2bc265881572ffc6b52bfc55e69e2420a9 Mon Sep 17 00:00:00 2001 From: Marc Siegfriedt Date: Wed, 1 Jul 2015 11:45:10 -0700 Subject: [PATCH 003/100] fixed the test as well --- builder/vmware/common/step_clean_vmx_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) mode change 100644 => 100755 builder/vmware/common/step_clean_vmx_test.go diff --git a/builder/vmware/common/step_clean_vmx_test.go b/builder/vmware/common/step_clean_vmx_test.go old mode 100644 new mode 100755 index ea30fb54a..3ca6a7e23 --- a/builder/vmware/common/step_clean_vmx_test.go +++ b/builder/vmware/common/step_clean_vmx_test.go @@ -61,8 +61,8 @@ func TestStepCleanVMX_floppyPath(t *testing.T) { Value string }{ {"floppy0.present", "FALSE"}, - {"floppy0.filetype", ""}, - {"floppy0.filename", ""}, + {"floppy0.fileType", ""}, + {"floppy0.fileName", ""}, } for _, tc := range cases { @@ -109,9 +109,9 @@ func TestStepCleanVMX_isoPath(t *testing.T) { Key string Value string }{ - {"ide0:0.filename", "auto detect"}, - {"ide0:0.devicetype", "cdrom-raw"}, - {"ide0:1.filename", "bar"}, + {"ide0:0.fileName", "auto detect"}, + {"ide0:0.deviceType", "cdrom-raw"}, + {"ide0:1.fileName", "bar"}, {"foo", "bar"}, } @@ -130,12 +130,12 @@ func TestStepCleanVMX_isoPath(t *testing.T) { const testVMXFloppyPath = ` floppy0.present = "TRUE" -floppy0.filetype = "file" +floppy0.fileType = "file" ` const testVMXISOPath = ` -ide0:0.devicetype = "cdrom-image" -ide0:0.filename = "foo" -ide0:1.filename = "bar" +ide0:0.deviceType = "cdrom-image" +ide0:0.fileName = "foo" +ide0:1.fileName = "bar" foo = "bar" ` From 1d7d490c01ae9a199ab848940f6f7857445cc5fa Mon Sep 17 00:00:00 2001 From: Marc Siegfriedt Date: Wed, 24 Jun 2015 11:58:00 -0700 Subject: [PATCH 004/100] updated how vmx entries are handled --- builder/vmware/common/step_configure_vmx.go | 2 -- builder/vmware/common/vmx.go | 20 ++++++++++++++++---- 2 files changed, 16 insertions(+), 6 deletions(-) mode change 100644 => 100755 builder/vmware/common/step_configure_vmx.go mode change 100644 => 100755 builder/vmware/common/vmx.go diff --git a/builder/vmware/common/step_configure_vmx.go b/builder/vmware/common/step_configure_vmx.go old mode 100644 new mode 100755 index 401d53055..14c68e76a --- a/builder/vmware/common/step_configure_vmx.go +++ b/builder/vmware/common/step_configure_vmx.go @@ -5,7 +5,6 @@ import ( "io/ioutil" "log" "regexp" - "strings" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" @@ -53,7 +52,6 @@ func (s *StepConfigureVMX) Run(state multistep.StateBag) multistep.StepAction { // Set custom data for k, v := range s.CustomData { log.Printf("Setting VMX: '%s' = '%s'", k, v) - k = strings.ToLower(k) vmxData[k] = v } diff --git a/builder/vmware/common/vmx.go b/builder/vmware/common/vmx.go old mode 100644 new mode 100755 index e7cdb662f..ab0291807 --- a/builder/vmware/common/vmx.go +++ b/builder/vmware/common/vmx.go @@ -17,7 +17,7 @@ import ( func ParseVMX(contents string) map[string]string { results := make(map[string]string) - lineRe := regexp.MustCompile(`^(.+?)\s*=\s*"(.*?)"\s*$`) + lineRe := regexp.MustCompile(`^(.+?)\s*=\s*"?(.*?)"?\s*$`) for _, line := range strings.Split(contents, "\n") { matches := lineRe.FindStringSubmatch(line) @@ -25,8 +25,7 @@ func ParseVMX(contents string) map[string]string { continue } - key := strings.ToLower(matches[1]) - results[key] = matches[2] + results[matches[1]] = matches[2] } return results @@ -43,9 +42,22 @@ func EncodeVMX(contents map[string]string) string { i++ } + // a list of VMX key fragments that should not be wrapped in quotes, + // fragments because multiple disks can use the virtualSSD suffix + noQuotes := []string { + "virtualSSD", + } + sort.Strings(keys) for _, k := range keys { - buf.WriteString(fmt.Sprintf("%s = \"%s\"\n", k, contents[k])) + pat := "%s = \"%s\"\n" + for _, q := range noQuotes { + if strings.Contains(k, q) { + pat = "%s = %s\n" + break; + } + } + buf.WriteString(fmt.Sprintf(pat, k, contents[k])) } return buf.String() From df1be999dcc6a7861ce19ec7c06e99022c1db6e6 Mon Sep 17 00:00:00 2001 From: Marc Siegfriedt Date: Wed, 1 Jul 2015 11:30:39 -0700 Subject: [PATCH 005/100] Fixed case sensitive issue with VMX entries not being overwritten --- builder/vmware/common/step_clean_vmx.go | 4 ++-- builder/vmware/vmx/step_clone_vmx.go | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) mode change 100644 => 100755 builder/vmware/common/step_clean_vmx.go mode change 100644 => 100755 builder/vmware/vmx/step_clone_vmx.go diff --git a/builder/vmware/common/step_clean_vmx.go b/builder/vmware/common/step_clean_vmx.go old mode 100644 new mode 100755 index bf76f5863..44bf4c407 --- a/builder/vmware/common/step_clean_vmx.go +++ b/builder/vmware/common/step_clean_vmx.go @@ -51,8 +51,8 @@ func (s StepCleanVMX) Run(state multistep.StateBag) multistep.StepAction { ui.Message("Detaching ISO from CD-ROM device...") - vmxData[ide+"devicetype"] = "cdrom-raw" - vmxData[ide+"filename"] = "auto detect" + vmxData[ide+"deviceType"] = "cdrom-raw" + vmxData[ide+"fileName"] = "auto detect" } ui.Message("Disabling VNC server...") diff --git a/builder/vmware/vmx/step_clone_vmx.go b/builder/vmware/vmx/step_clone_vmx.go old mode 100644 new mode 100755 index a020e1627..1dbae678a --- a/builder/vmware/vmx/step_clone_vmx.go +++ b/builder/vmware/vmx/step_clone_vmx.go @@ -38,14 +38,14 @@ func (s *StepCloneVMX) Run(state multistep.StateBag) multistep.StepAction { } var diskName string - if _, ok := vmxData["scsi0:0.filename"]; ok { - diskName = vmxData["scsi0:0.filename"] + if _, ok := vmxData["scsi0:0.fileName"]; ok { + diskName = vmxData["scsi0:0.fileName"] } - if _, ok := vmxData["sata0:0.filename"]; ok { - diskName = vmxData["sata0:0.filename"] + if _, ok := vmxData["sata0:0.fileName"]; ok { + diskName = vmxData["sata0:0.fileName"] } - if _, ok := vmxData["ide0:0.filename"]; ok { - diskName = vmxData["ide0:0.filename"] + if _, ok := vmxData["ide0:0.fileName"]; ok { + diskName = vmxData["ide0:0.fileName"] } if diskName == "" { err := fmt.Errorf("Root disk filename could not be found!") From e9ef2b987eca789b65a306a902f2af54be090e17 Mon Sep 17 00:00:00 2001 From: Marc Siegfriedt Date: Wed, 1 Jul 2015 11:45:10 -0700 Subject: [PATCH 006/100] fixed the test as well --- builder/vmware/common/step_clean_vmx_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) mode change 100644 => 100755 builder/vmware/common/step_clean_vmx_test.go diff --git a/builder/vmware/common/step_clean_vmx_test.go b/builder/vmware/common/step_clean_vmx_test.go old mode 100644 new mode 100755 index ea30fb54a..3ca6a7e23 --- a/builder/vmware/common/step_clean_vmx_test.go +++ b/builder/vmware/common/step_clean_vmx_test.go @@ -61,8 +61,8 @@ func TestStepCleanVMX_floppyPath(t *testing.T) { Value string }{ {"floppy0.present", "FALSE"}, - {"floppy0.filetype", ""}, - {"floppy0.filename", ""}, + {"floppy0.fileType", ""}, + {"floppy0.fileName", ""}, } for _, tc := range cases { @@ -109,9 +109,9 @@ func TestStepCleanVMX_isoPath(t *testing.T) { Key string Value string }{ - {"ide0:0.filename", "auto detect"}, - {"ide0:0.devicetype", "cdrom-raw"}, - {"ide0:1.filename", "bar"}, + {"ide0:0.fileName", "auto detect"}, + {"ide0:0.deviceType", "cdrom-raw"}, + {"ide0:1.fileName", "bar"}, {"foo", "bar"}, } @@ -130,12 +130,12 @@ func TestStepCleanVMX_isoPath(t *testing.T) { const testVMXFloppyPath = ` floppy0.present = "TRUE" -floppy0.filetype = "file" +floppy0.fileType = "file" ` const testVMXISOPath = ` -ide0:0.devicetype = "cdrom-image" -ide0:0.filename = "foo" -ide0:1.filename = "bar" +ide0:0.deviceType = "cdrom-image" +ide0:0.fileName = "foo" +ide0:1.fileName = "bar" foo = "bar" ` From 26aa3dd575aadbd14e13e321d45d87d9dfecd482 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 7 Jul 2015 11:07:38 -0600 Subject: [PATCH 007/100] amazon/common: store instance ID earlier for cleanup --- .../amazon/common/step_run_source_instance.go | 30 ++++++++++--------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index b94a6031c..b3bb6744f 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -31,7 +31,7 @@ type StepRunSourceInstance struct { UserData string UserDataFile string - instance *ec2.Instance + instanceId string spotRequest *ec2.SpotInstanceRequest } @@ -235,6 +235,9 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi instanceId = *spotResp.SpotInstanceRequests[0].InstanceID } + // Set the instance ID so that the cleanup works properly + s.instanceId = instanceId + ui.Message(fmt.Sprintf("Instance ID: %s", instanceId)) ui.Say(fmt.Sprintf("Waiting for instance (%v) to become ready...", instanceId)) stateChange := StateChangeConf{ @@ -251,7 +254,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi return multistep.ActionHalt } - s.instance = latestInstance.(*ec2.Instance) + instance := latestInstance.(*ec2.Instance) ec2Tags := make([]*ec2.Tag, 1, len(s.Tags)+1) ec2Tags[0] = &ec2.Tag{Key: aws.String("Name"), Value: aws.String("Packer Builder")} @@ -261,7 +264,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi _, err = ec2conn.CreateTags(&ec2.CreateTagsInput{ Tags: ec2Tags, - Resources: []*string{s.instance.InstanceID}, + Resources: []*string{instance.InstanceID}, }) if err != nil { ui.Message( @@ -269,20 +272,20 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi } if s.Debug { - if s.instance.PublicDNSName != nil && *s.instance.PublicDNSName != "" { - ui.Message(fmt.Sprintf("Public DNS: %s", *s.instance.PublicDNSName)) + if instance.PublicDNSName != nil && *instance.PublicDNSName != "" { + ui.Message(fmt.Sprintf("Public DNS: %s", *instance.PublicDNSName)) } - if s.instance.PublicIPAddress != nil && *s.instance.PublicIPAddress != "" { - ui.Message(fmt.Sprintf("Public IP: %s", *s.instance.PublicIPAddress)) + if instance.PublicIPAddress != nil && *instance.PublicIPAddress != "" { + ui.Message(fmt.Sprintf("Public IP: %s", *instance.PublicIPAddress)) } - if s.instance.PrivateIPAddress != nil && *s.instance.PrivateIPAddress != "" { - ui.Message(fmt.Sprintf("Private IP: %s", *s.instance.PrivateIPAddress)) + if instance.PrivateIPAddress != nil && *instance.PrivateIPAddress != "" { + ui.Message(fmt.Sprintf("Private IP: %s", *instance.PrivateIPAddress)) } } - state.Put("instance", s.instance) + state.Put("instance", instance) return multistep.ActionContinue } @@ -313,16 +316,15 @@ func (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) { } // Terminate the source instance if it exists - if s.instance != nil { - + if s.instanceId != "" { ui.Say("Terminating the source AWS instance...") - if _, err := ec2conn.TerminateInstances(&ec2.TerminateInstancesInput{InstanceIDs: []*string{s.instance.InstanceID}}); err != nil { + if _, err := ec2conn.TerminateInstances(&ec2.TerminateInstancesInput{InstanceIDs: []*string{&s.instanceId}}); err != nil { ui.Error(fmt.Sprintf("Error terminating instance, may still be around: %s", err)) return } stateChange := StateChangeConf{ Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"}, - Refresh: InstanceStateRefreshFunc(ec2conn, *s.instance.InstanceID), + Refresh: InstanceStateRefreshFunc(ec2conn, s.instanceId), Target: "terminated", } From 1f6137e6368b3e6447121bff24c0066433ad730f Mon Sep 17 00:00:00 2001 From: Andrew Dahl Date: Wed, 8 Jul 2015 16:55:25 -0500 Subject: [PATCH 008/100] Add 1/10th second delay between key events to VNC for QEMU --- builder/qemu/step_type_boot_command.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/builder/qemu/step_type_boot_command.go b/builder/qemu/step_type_boot_command.go index e42903f55..13c8622ed 100644 --- a/builder/qemu/step_type_boot_command.go +++ b/builder/qemu/step_type_boot_command.go @@ -177,7 +177,9 @@ func vncSendString(c *vnc.ClientConn, original string) { } c.KeyEvent(keyCode, true) + time.Sleep(time.Second/10) c.KeyEvent(keyCode, false) + time.Sleep(time.Second/10) if keyShift { c.KeyEvent(KeyLeftShift, false) From 8495a8c919fb1118c3ea5f4e29b18b774e119c40 Mon Sep 17 00:00:00 2001 From: Gleb M Borisov Date: Wed, 15 Jul 2015 02:11:46 +0300 Subject: [PATCH 009/100] Fix handling IPv6 when ssh_interface set (openstack builder) --- builder/openstack/ssh.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/builder/openstack/ssh.go b/builder/openstack/ssh.go index 3e7350d11..3e1c8c20f 100644 --- a/builder/openstack/ssh.go +++ b/builder/openstack/ssh.go @@ -101,11 +101,15 @@ func sshAddrFromPool(s *servers.Server, desired string) string { if address["OS-EXT-IPS:type"] == "floating" { addr = address["addr"].(string) } else { - if address["version"].(float64) == 4 { + if address["version"].(float64) == 6 { + addr = fmt.Sprintf("[%s]", address["addr"].(string)) + } else { addr = address["addr"].(string) } } + if addr != "" { + log.Printf("[DEBUG] Detected address: %s", addr) return addr } } From 988cf2fecff95e20933fcd43b3aefb6c6ae14b95 Mon Sep 17 00:00:00 2001 From: Travis Truman Date: Wed, 15 Jul 2015 21:31:13 -0400 Subject: [PATCH 010/100] Fixes #2434 by adding OpenStack Glance metadata support --- builder/openstack/image_config.go | 3 ++- builder/openstack/step_create_image.go | 3 ++- website/source/docs/builders/openstack.html.markdown | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/builder/openstack/image_config.go b/builder/openstack/image_config.go index 124449eab..b52ad2c67 100644 --- a/builder/openstack/image_config.go +++ b/builder/openstack/image_config.go @@ -8,7 +8,8 @@ import ( // ImageConfig is for common configuration related to creating Images. type ImageConfig struct { - ImageName string `mapstructure:"image_name"` + ImageName string `mapstructure:"image_name"` + ImageMetadata map[string]string `mapstructure:"metadata"` } func (c *ImageConfig) Prepare(ctx *interpolate.Context) []error { diff --git a/builder/openstack/step_create_image.go b/builder/openstack/step_create_image.go index b777e8b0b..9b6ac0cd8 100644 --- a/builder/openstack/step_create_image.go +++ b/builder/openstack/step_create_image.go @@ -30,7 +30,8 @@ func (s *stepCreateImage) Run(state multistep.StateBag) multistep.StepAction { // Create the image ui.Say(fmt.Sprintf("Creating the image: %s", config.ImageName)) imageId, err := servers.CreateImage(client, server.ID, servers.CreateImageOpts{ - Name: config.ImageName, + Name: config.ImageName, + Metadata: config.ImageMetadata, }).ExtractImageID() if err != nil { err := fmt.Errorf("Error creating image: %s", err) diff --git a/website/source/docs/builders/openstack.html.markdown b/website/source/docs/builders/openstack.html.markdown index fec1a85a6..b61e503be 100644 --- a/website/source/docs/builders/openstack.html.markdown +++ b/website/source/docs/builders/openstack.html.markdown @@ -96,6 +96,8 @@ can be configured for this builder. * `rackconnect_wait` (boolean) - For rackspace, whether or not to wait for Rackconnect to assign the machine an IP address before connecting via SSH. Defaults to false. +* `metadata` (object of key/value strings) - Glance metadata that will be applied + to the image. ## Basic Example: Rackspace public cloud From 9007b1cc6762b12a98f39ccdd2ec5460391aeb35 Mon Sep 17 00:00:00 2001 From: Matthew Patton Date: Tue, 21 Jul 2015 17:24:55 -0400 Subject: [PATCH 011/100] Document behavior of AWS {access,secret}_key in relation to credentials file and profile lookup via AWS_PROFILE --- .../docs/builders/amazon-chroot.html.markdown | 14 ++++---------- .../source/docs/builders/amazon-ebs.html.markdown | 8 ++++---- .../docs/builders/amazon-instance.html.markdown | 9 +++++---- website/source/docs/builders/amazon.html.markdown | 4 ++-- 4 files changed, 15 insertions(+), 20 deletions(-) diff --git a/website/source/docs/builders/amazon-chroot.html.markdown b/website/source/docs/builders/amazon-chroot.html.markdown index d6b61ca8b..b3d1644dd 100644 --- a/website/source/docs/builders/amazon-chroot.html.markdown +++ b/website/source/docs/builders/amazon-chroot.html.markdown @@ -58,11 +58,9 @@ can be configured for this builder. ### Required: * `access_key` (string) - The access key used to communicate with AWS. - If not specified, Packer will use the key from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set. - If the environmental variables aren't set and Packer is running on - an EC2 instance, Packer will check the instance metadata for IAM role - keys. + If not specified, Packer will search the standard [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file using environment variable `AWS_PROFILE` as the profile name, will use the `[default]` entry, + or will fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY`. + Finally, if Packer is running on an EC2 instance it will check the instance metadata for IAM role keys. * `ami_name` (string) - The name of the resulting AMI that will appear when managing AMIs in the AWS console or via APIs. This must be unique. @@ -70,11 +68,7 @@ can be configured for this builder. [configuration templates](/docs/templates/configuration-templates.html) for more info) * `secret_key` (string) - The secret key used to communicate with AWS. - If not specified, Packer will use the secret from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set. - If the environmental variables aren't set and Packer is running on - an EC2 instance, Packer will check the instance metadata for IAM role - keys. + Lookup behavior is as above for `access_key` except the variables are `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY`. * `source_ami` (string) - The source AMI whose root volume will be copied and provisioned on the currently running instance. This must be an diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index 69a9a5c04..fc78901a6 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -38,8 +38,9 @@ can be configured for this builder. ### Required: * `access_key` (string) - The access key used to communicate with AWS. - If not specified, Packer will use the key from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set. + If not specified, Packer will search the standard [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file using environment variable `AWS_PROFILE` as the profile name, will use the `[default]` entry, + or will fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY`. + Finally, if Packer is running on an EC2 instance it will check the instance metadata for IAM role keys. * `ami_name` (string) - The name of the resulting AMI that will appear when managing AMIs in the AWS console or via APIs. This must be unique. @@ -53,8 +54,7 @@ can be configured for this builder. to launch the EC2 instance to create the AMI. * `secret_key` (string) - The secret key used to communicate with AWS. - If not specified, Packer will use the secret from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set. + Lookup behavior is as above for `access_key` except the variables are `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` * `source_ami` (string) - The initial AMI used as a base for the newly created machine. diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index fa3c8a190..81e425c9a 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -43,8 +43,9 @@ can be configured for this builder. ### Required: * `access_key` (string) - The access key used to communicate with AWS. - If not specified, Packer will use the key from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set. + If not specified, Packer will search the standard [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file using environment variable `AWS_PROFILE` as the profile name, will use the `[default]` entry, + or will fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY`. + Finally, if Packer is running on an EC2 instance it will check the instance metadata for IAM role keys. * `account_id` (string) - Your AWS account ID. This is required for bundling the AMI. This is _not the same_ as the access key. You can find your @@ -65,8 +66,8 @@ can be configured for this builder. This bucket will be created if it doesn't exist. * `secret_key` (string) - The secret key used to communicate with AWS. - If not specified, Packer will use the secret from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set. + Lookup behavior is as above for `access_key` except the variables are `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` + * `source_ami` (string) - The initial AMI used as a base for the newly created machine. diff --git a/website/source/docs/builders/amazon.html.markdown b/website/source/docs/builders/amazon.html.markdown index ad336ad1c..f82457f1a 100644 --- a/website/source/docs/builders/amazon.html.markdown +++ b/website/source/docs/builders/amazon.html.markdown @@ -33,8 +33,8 @@ much easier to use and Amazon generally recommends EBS-backed images nowadays. ## Using an IAM Instance Profile -If AWS keys are not specified in the template, a [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file or through environment variables -Packer will use credentials provided by the instance's IAM profile, if it has one. +If AWS keys are not specified in the template, Packer will consult the [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file, try the standard AWS environment variables, and then +any IAM role credentials defined by the instance's metadata. The following policy document provides the minimal set permissions necessary for Packer to work: From 985c3c576b5da25ccc4f653fc4936c95ee21fb90 Mon Sep 17 00:00:00 2001 From: Xiol Date: Wed, 22 Jul 2015 10:13:04 +0100 Subject: [PATCH 012/100] Update setup documentation to cover issue #1117 In issue #1117, the packer binary can sometimes conflict with the packer binary supplied with cracklib. This documentation update covers this and provides workarounds for affected users. --- .../intro/getting-started/setup.html.markdown | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/website/source/intro/getting-started/setup.html.markdown b/website/source/intro/getting-started/setup.html.markdown index ae14c2748..5e4734e08 100644 --- a/website/source/intro/getting-started/setup.html.markdown +++ b/website/source/intro/getting-started/setup.html.markdown @@ -58,6 +58,34 @@ If you get an error that `packer` could not be found, then your PATH environment variable was not setup properly. Please go back and ensure that your PATH variable contains the directory which has Packer installed. +The `packer` binary may conflict with the cracklib-supplied packer binary +on RPM-based systems like Fedora, RHEL or CentOS. If this happens, running +`packer` will result in no output or something like this: + +```text +$ packer +/usr/share/cracklib/pw_dict.pwd: Permission denied +/usr/share/cracklib/pw_dict: Permission denied +``` + +In this case you may wish to symlink the `packer` binary to `packer.io` +and use that instead. e.g. + +```text +ln -s /usr/local/bin/packer /usr/local/bin/packer.io +``` + +Then replace `packer` with `packer.io` when following the rest of the +documentation. + +Alternatively you could change your `$PATH` so that the right packer +binary is selected first, however this may cause issues when attempting +to change passwords in the future. + +```text +export PATH="/path/to/packer/directory:$PATH" +``` + Otherwise, Packer is installed and you're ready to go! ## Alternative Installation Methods From 823e9e73fe45f1e3055465fe00dd45b8cc47fc32 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 17:18:08 -0700 Subject: [PATCH 013/100] Docs cleanup - Reformat .html / .erb files - Remove extra in index.html.erb - Add htmlbeautifier gem - Add middleman-breadcrumbs - Add make format (calls htmlbeautifier) --- website/Gemfile | 2 + website/Gemfile.lock | 5 + website/Makefile | 4 + website/README.md | 10 + website/config.rb | 2 + website/source/downloads.html.erb | 84 ++++---- website/source/index.html.erb | 125 +++++------ website/source/layouts/community.erb | 8 +- website/source/layouts/docs.erb | 196 +++++++++--------- .../source/layouts/docs_machine_readable.erb | 27 +-- website/source/layouts/inner.erb | 47 ++--- website/source/layouts/intro.erb | 50 ++--- website/source/layouts/layout.erb | 136 ++++++------ 13 files changed, 358 insertions(+), 338 deletions(-) diff --git a/website/Gemfile b/website/Gemfile index 2b35e2810..14f80e508 100644 --- a/website/Gemfile +++ b/website/Gemfile @@ -3,3 +3,5 @@ source "https://rubygems.org" ruby "2.2.2" gem "middleman-hashicorp", github: "hashicorp/middleman-hashicorp" +gem "middleman-breadcrumbs" +gem "htmlbeautifier" \ No newline at end of file diff --git a/website/Gemfile.lock b/website/Gemfile.lock index 216114847..3895f5bb1 100644 --- a/website/Gemfile.lock +++ b/website/Gemfile.lock @@ -69,6 +69,7 @@ GEM hitimes (1.2.2) hooks (0.4.0) uber (~> 0.0.4) + htmlbeautifier (1.1.0) htmlcompressor (0.2.0) http_parser.rb (0.6.0) i18n (0.7.0) @@ -92,6 +93,8 @@ GEM middleman-sprockets (>= 3.1.2) sass (>= 3.4.0, < 4.0) uglifier (~> 2.5) + middleman-breadcrumbs (0.1.0) + middleman (>= 3.3.5) middleman-core (3.3.12) activesupport (~> 4.1.0) bundler (~> 1.1) @@ -179,4 +182,6 @@ PLATFORMS ruby DEPENDENCIES + htmlbeautifier + middleman-breadcrumbs middleman-hashicorp! diff --git a/website/Makefile b/website/Makefile index 9888cfa82..100a4dbf9 100644 --- a/website/Makefile +++ b/website/Makefile @@ -8,3 +8,7 @@ dev: init build: init PACKER_DISABLE_DOWNLOAD_FETCH=true PACKER_VERSION=1.0 bundle exec middleman build + +format: + bundle exec htmlbeautifier -t 2 source/*.erb + bundle exec htmlbeautifier -t 2 source/layouts/*.erb \ No newline at end of file diff --git a/website/README.md b/website/README.md index 881362f5a..e86ccc60e 100644 --- a/website/README.md +++ b/website/README.md @@ -21,3 +21,13 @@ make dev Then open up `localhost:4567`. Note that some URLs you may need to append ".html" to make them work (in the navigation and such). + +## Keeping Tidy + +To keep the source code nicely formatted, there is a `make format` target. This +runs `htmlbeautify` and `pandoc` to reformat the source code so it's nicely formatted. + + make format + +Note that you will need to install pandoc yourself. `make format` will skip it +if you don't have it installed. \ No newline at end of file diff --git a/website/config.rb b/website/config.rb index 9c21ff297..80fc3680b 100644 --- a/website/config.rb +++ b/website/config.rb @@ -4,6 +4,8 @@ set :base_url, "https://www.packer.io/" +activate :breadcrumbs + activate :hashicorp do |h| h.version = ENV["PACKER_VERSION"] h.bintray_enabled = ENV["BINTRAY_ENABLED"] diff --git a/website/source/downloads.html.erb b/website/source/downloads.html.erb index d10dfb5c6..e8c66f970 100644 --- a/website/source/downloads.html.erb +++ b/website/source/downloads.html.erb @@ -3,47 +3,49 @@ page_title: "Downloads" ---
-
-

Downloads

- Latest version: <%= latest_version %> -
+
+

Downloads

+ Latest version: <%= latest_version %> +
-
-
-
-
-

- Below are all available downloads for the latest version of Packer - (<%= latest_version %>). Please download the proper package for your - operating system and architecture. You can find SHA256 checksums - for packages here. -

-
-
- <% product_versions.each do |os, versions| %> -
-
-
<%= system_icon(os) %>
-
-

<%= os %>

- -
-
-
-
- <% end %> - -
-
- - - -
-
-
+
+
+
+

+ Below are all available downloads for the latest version of Packer ( + <%= latest_version %>). Please download the proper package for your operating system and architecture. You can find SHA256 checksums for packages here. +

+
+
+ <% product_versions.each do |os, versions| %> +
+
+
+ <%= system_icon(os) %> +
+
+

<%= os %>

+ +
+
+
+
+ <% end %> +
+
+ + + +
+
+
diff --git a/website/source/index.html.erb b/website/source/index.html.erb index 6d38bb645..1658f67a3 100644 --- a/website/source/index.html.erb +++ b/website/source/index.html.erb @@ -1,75 +1,58 @@ --- -description: |- - Packer is a free and open source tool for creating golden images for multiple platforms from a single source configuration. +description: Packer is a free and open source tool for creating golden images + for multiple platforms from a single source configuration. ---
- -
-
-
-
-

- Packer is a tool for creating machine and container images for multiple platforms from a single source configuration. -

-
-
-
-
- -
- -
- - -
-
-
-
- <%= image_tag 'screenshots/vmware_and_virtualbox.png', class: 'img-responsive' %> -
- -
-

Modern, Automated

-

- Packer is easy to use and automates the creation of any type - of machine image. It embraces modern configuration management by - encouraging you to use automated scripts to install and - configure the software within your Packer-made images. - - Packer brings machine images into the modern age, unlocking - untapped potential and opening new opportunities. -

-
-
-
-
- -
-
-
-
-

Works Great With

-

- Out of the box Packer comes with support to build images for - Amazon EC2, DigitalOcean, Docker, Google Compute Engine, QEMU, - VirtualBox, VMware, and more. Support for - more platforms is on the way, and anyone can add new platforms - via plugins. -

-
- -
- <%= image_tag 'screenshots/works_with.png', class: 'img-responsive' %> -
-
-
-
- - +
+
+
+
+

+ Packer is a tool for creating machine and container images for multiple platforms from a single source configuration. +

+
+
+
+
+
+ +
+
+
+
+
+ <%= image_tag 'screenshots/vmware_and_virtualbox.png', class: 'img-responsive' %> +
+
+

Modern, Automated

+

+ Packer is easy to use and automates the creation of any type of machine image. It embraces modern configuration management by encouraging you to use automated scripts to install and configure the software within your Packer-made images. Packer brings machine images into the modern age, unlocking untapped potential and opening new opportunities. +

+
+
+
+
+
+
+
+
+

Works Great With

+

+ Out of the box Packer comes with support to build images for Amazon EC2, DigitalOcean, Docker, Google Compute Engine, QEMU, VirtualBox, VMware, and more. Support for more platforms is on the way, and anyone can add new platforms via plugins. +

+
+
+ <%= image_tag 'screenshots/works_with.png', class: 'img-responsive' %> +
+
+
+
+ + diff --git a/website/source/layouts/community.erb b/website/source/layouts/community.erb index 12c1cc7bc..53dacbb4e 100644 --- a/website/source/layouts/community.erb +++ b/website/source/layouts/community.erb @@ -1,6 +1,6 @@ <% wrap_layout :inner do %> - <% content_for :sidebar do %> -

- <% end %> - <%= yield %> + <% content_for :sidebar do %> +

+ <% end %> + <%= yield %> <% end %> diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index d0c331b1f..2b8bb8810 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -1,97 +1,103 @@ <% wrap_layout :inner do %> - <% content_for :sidebar do %> -

Docs

- - - - - - - - - - - - - - - - - <% end %> - <%= yield %> + <% content_for :sidebar do %> +

Docs

+ + + + + + + + + <% end %> + <%= yield %> <% end %> diff --git a/website/source/layouts/docs_machine_readable.erb b/website/source/layouts/docs_machine_readable.erb index 4a3cac34d..a19c42258 100644 --- a/website/source/layouts/docs_machine_readable.erb +++ b/website/source/layouts/docs_machine_readable.erb @@ -1,15 +1,16 @@ <% wrap_layout :inner do %> - <% content_for :sidebar do %> -

Docs

- - - <% end %> - <%= yield %> + <% content_for :sidebar do %> +

Docs

+ + <% end %> + <%= yield %> <% end %> diff --git a/website/source/layouts/inner.erb b/website/source/layouts/inner.erb index c570f73f6..0706d1f9d 100644 --- a/website/source/layouts/inner.erb +++ b/website/source/layouts/inner.erb @@ -1,30 +1,29 @@ <% wrap_layout :layout do %> -
-
- -
-
- <%= yield %> +
+
+ - - <% if current_page.data.next_url %> -
-
<% end %> diff --git a/website/source/layouts/intro.erb b/website/source/layouts/intro.erb index 127d6ab84..cea9a3403 100644 --- a/website/source/layouts/intro.erb +++ b/website/source/layouts/intro.erb @@ -1,26 +1,28 @@ <% wrap_layout :inner do %> - <% content_for :sidebar do %> -

Intro

- - - - - <% end %> - <%= yield %> + <% content_for :sidebar do %> +

Intro

+ + + <% end %> + <%= yield %> <% end %> diff --git a/website/source/layouts/layout.erb b/website/source/layouts/layout.erb index 26a1dac6b..420883de6 100644 --- a/website/source/layouts/layout.erb +++ b/website/source/layouts/layout.erb @@ -1,79 +1,83 @@ - - <%= [current_page.data.page_title, "Packer by HashiCorp"].compact.join(" - ") %> + + + <%= [current_page.data.page_title, "Packer by HashiCorp"].compact.join(" - ") %> + - - <%= stylesheet_link_tag "application" %> - + <%= stylesheet_link_tag "application" %> - - - " type="image/x-icon"> - " type="image/x-icon"> - - - - - - - - <%= yield %> - -
- - - + " type="image/x-icon"> + " type="image/x-icon"> + + + + + + <%= yield %> +
+ - + From 448fce56c0fd9ba81b32bb714a0b1af6c7754b56 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 18:09:32 -0700 Subject: [PATCH 014/100] Replace tabs with 2 spaces --- .../source/assets/stylesheets/_footer.scss | 60 +++++++++---------- .../source/assets/stylesheets/_helpers.scss | 26 ++++---- website/source/assets/stylesheets/_reset.scss | 12 ++-- 3 files changed, 49 insertions(+), 49 deletions(-) diff --git a/website/source/assets/stylesheets/_footer.scss b/website/source/assets/stylesheets/_footer.scss index 7f771340f..67594e6fe 100644 --- a/website/source/assets/stylesheets/_footer.scss +++ b/website/source/assets/stylesheets/_footer.scss @@ -12,45 +12,45 @@ footer { margin-left: -20px; } - ul { - margin-top: 40px; - @include respond-to(mobile) { - margin-left: $baseline; - margin-top: $baseline; + ul { + margin-top: 40px; + @include respond-to(mobile) { + margin-left: $baseline; + margin-top: $baseline; } - li { - display: inline; - margin-right: 50px; - @include respond-to(mobile) { - margin-right: 20px; - display: list-item; + li { + display: inline; + margin-right: 50px; + @include respond-to(mobile) { + margin-right: 20px; + display: list-item; } } - .hashi-logo { - background: image-url('logo_footer.png') no-repeat center top; - height: 40px; - width: 40px; - background-size: 37px 40px; - text-indent: -999999px; - display: inline-block; - margin-top: -10px; - margin-right: 0; - @include respond-to(mobile) { - margin-top: -50px; - margin-right: $baseline; - } - } - } + .hashi-logo { + background: image-url('logo_footer.png') no-repeat center top; + height: 40px; + width: 40px; + background-size: 37px 40px; + text-indent: -999999px; + display: inline-block; + margin-top: -10px; + margin-right: 0; + @include respond-to(mobile) { + margin-top: -50px; + margin-right: $baseline; + } + } + } - .active { + .active { color: $green; - } + } - button { + button { margin-top: 20px; - } + } } .page-wrap { diff --git a/website/source/assets/stylesheets/_helpers.scss b/website/source/assets/stylesheets/_helpers.scss index d28b5265c..8c20db3fc 100644 --- a/website/source/assets/stylesheets/_helpers.scss +++ b/website/source/assets/stylesheets/_helpers.scss @@ -70,17 +70,17 @@ $mono: 'Inconsolata', 'courier new', courier, mono-space; background-color: #000; color: $white; - a { + a { color: inherit; &:hover { - color: $green; - } + color: $green; + } - &:active { - color: darken($green, 30%); - } - } + &:active { + color: darken($green, 30%); + } + } } .white-background { @@ -102,9 +102,9 @@ $mono: 'Inconsolata', 'courier new', courier, mono-space; color: $orange; font-size: 20px; - a:hover, a:active, a:visited { + a:hover, a:active, a:visited { color: inherit; - } + } } // media queries @@ -170,13 +170,13 @@ $break-lg: 980px; @mixin transform-scale($value) { -webkit-transform: scale($value); - -moz-transform: scale($value); - transform: scale($value); + -moz-transform: scale($value); + transform: scale($value); } @mixin transition($type, $speed, $easing) { - -webkit-transition: $type $speed $easing; - -moz-transition: $type $speed $easing; + -webkit-transition: $type $speed $easing; + -moz-transition: $type $speed $easing; -o-transition: $type $speed $easing; transition: $type $speed $easing; } diff --git a/website/source/assets/stylesheets/_reset.scss b/website/source/assets/stylesheets/_reset.scss index 4ebb5fd27..5a417ec09 100644 --- a/website/source/assets/stylesheets/_reset.scss +++ b/website/source/assets/stylesheets/_reset.scss @@ -14,10 +14,10 @@ form, input, textarea, button { line-height: 1.0; color: inherit; - &:focus { - line-height: 1.0; - box-shadow: none !important; - outline: none; - vertical-align: middle; - } + &:focus { + line-height: 1.0; + box-shadow: none !important; + outline: none; + vertical-align: middle; + } } From b9c9da7157a0bbc2709fc1fdbbe91c4189583d7a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 19:26:48 -0700 Subject: [PATCH 015/100] Added a static version of this so we can partial it into place in the layout. This prevents it from being reformatted. --- website/source/layouts/google-analytics.html | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 website/source/layouts/google-analytics.html diff --git a/website/source/layouts/google-analytics.html b/website/source/layouts/google-analytics.html new file mode 100644 index 000000000..6cd45279d --- /dev/null +++ b/website/source/layouts/google-analytics.html @@ -0,0 +1,9 @@ + From dd255df412fe8447281dbcd0751f98658a1a2f41 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 19:28:03 -0700 Subject: [PATCH 016/100] Add pandoc magical markdown reformatter --- website/Makefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/website/Makefile b/website/Makefile index 100a4dbf9..1cc81038c 100644 --- a/website/Makefile +++ b/website/Makefile @@ -11,4 +11,7 @@ build: init format: bundle exec htmlbeautifier -t 2 source/*.erb - bundle exec htmlbeautifier -t 2 source/layouts/*.erb \ No newline at end of file + bundle exec htmlbeautifier -t 2 source/layouts/*.erb + pandoc -v > /dev/null && find . -iname "*.html.markdown" | xargs -I{} bash -c "pandoc -r markdown -w markdown --tab-stop=2 --atx-headers -s --columns=80 {} > {}.new"\; || true + pandoc -v > /dev/null && find . -iname "*.html.markdown" | xargs -I{} bash -c "mv {}.new {}"\; || true + From 36052d8c2e19ae162e380e556a45cbf9fe3eb0ca Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 19:28:35 -0700 Subject: [PATCH 017/100] Add new layout with magic footer link and static google analytics partial --- website/source/layouts/layout.erb | 30 +++++++++++------------------- 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/website/source/layouts/layout.erb b/website/source/layouts/layout.erb index 420883de6..a5cc83c5b 100644 --- a/website/source/layouts/layout.erb +++ b/website/source/layouts/layout.erb @@ -52,9 +52,16 @@
  • A HashiCorp project.
  • -
  • - Edit this page -
  • + <% # current_page.path does not have an extension, but + # current_page.source_file does. Also, we don't want to show + # this on the homepage. + if current_page.url != "/" + current_page_source = current_page.path + \ + current_page.source_file.split(current_page.path)[1] %> +
  • + Edit this page +
  • + <% end %> @@ -63,21 +70,6 @@
    - + <%= partial "layouts/google-analytics.html" %> From 13ac8896a9561be234e310554248ea5f412bbda4 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 19:29:59 -0700 Subject: [PATCH 018/100] Reformat the layout file (again) --- website/source/layouts/layout.erb | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/source/layouts/layout.erb b/website/source/layouts/layout.erb index a5cc83c5b..f66adb067 100644 --- a/website/source/layouts/layout.erb +++ b/website/source/layouts/layout.erb @@ -58,18 +58,18 @@ if current_page.url != "/" current_page_source = current_page.path + \ current_page.source_file.split(current_page.path)[1] %> -
  • - Edit this page -
  • - <% end %> - - - - -
    +
  • + Edit this page +
  • + <% end %> + + + +
    - - <%= partial "layouts/google-analytics.html" %> - + + + <%= partial "layouts/google-analytics.html" %> + From d57c051651d86a2a1900c92ab600c8008f97ca8a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 19:31:00 -0700 Subject: [PATCH 019/100] Reformat everything --- website/source/community/index.html.markdown | 137 ++--- .../docs/basics/terminology.html.markdown | 87 ++-- .../docs/builders/amazon-chroot.html.markdown | 264 +++++----- .../docs/builders/amazon-ebs.html.markdown | 251 ++++----- .../builders/amazon-instance.html.markdown | 379 +++++++------- .../source/docs/builders/amazon.html.markdown | 51 +- .../source/docs/builders/custom.html.markdown | 18 +- .../docs/builders/digitalocean.html.markdown | 81 +-- .../source/docs/builders/docker.html.markdown | 234 +++++---- .../source/docs/builders/null.html.markdown | 27 +- .../docs/builders/openstack.html.markdown | 139 ++--- .../docs/builders/parallels-iso.html.markdown | 315 ++++++------ .../docs/builders/parallels-pvm.html.markdown | 222 ++++---- .../docs/builders/parallels.html.markdown | 49 +- .../source/docs/builders/qemu.html.markdown | 338 ++++++------- .../builders/virtualbox-iso.html.markdown | 417 +++++++-------- .../builders/virtualbox-ovf.html.markdown | 325 ++++++------ .../docs/builders/virtualbox.html.markdown | 41 +- .../docs/builders/vmware-iso.html.markdown | 477 +++++++++--------- .../docs/builders/vmware-vmx.html.markdown | 180 +++---- .../source/docs/builders/vmware.html.markdown | 41 +- .../docs/command-line/build.html.markdown | 45 +- .../docs/command-line/fix.html.markdown | 47 +- .../docs/command-line/inspect.html.markdown | 42 +- .../command-line/introduction.html.markdown | 33 +- .../machine-readable.html.markdown | 91 ++-- .../docs/command-line/push.html.markdown | 29 +- .../docs/command-line/validate.html.markdown | 26 +- .../source/docs/extend/builder.html.markdown | 197 ++++---- .../source/docs/extend/command.html.markdown | 92 ++-- .../extend/developing-plugins.html.markdown | 166 +++--- .../source/docs/extend/plugins.html.markdown | 91 ++-- .../docs/extend/post-processor.html.markdown | 101 ++-- .../docs/extend/provisioner.html.markdown | 101 ++-- website/source/docs/index.html.markdown | 17 +- .../source/docs/installation.html.markdown | 68 +-- .../command-build.html.markdown | 274 +++++----- .../command-inspect.html.markdown | 100 ++-- .../command-version.html.markdown | 72 +-- .../machine-readable/general.html.markdown | 44 +- .../docs/machine-readable/index.html.markdown | 41 +- .../other/core-configuration.html.markdown | 49 +- .../source/docs/other/debugging.html.markdown | 46 +- .../environmental-variables.html.markdown | 48 +- .../docs/post-processors/atlas.html.markdown | 69 ++- .../post-processors/compress.html.markdown | 45 +- .../docker-import.html.markdown | 44 +- .../post-processors/docker-push.html.markdown | 36 +- .../post-processors/docker-save.html.markdown | 32 +- .../post-processors/docker-tag.html.markdown | 53 +- .../vagrant-cloud.html.markdown | 103 ++-- .../post-processors/vagrant.html.markdown | 133 +++-- .../post-processors/vsphere.html.markdown | 51 +- .../provisioners/ansible-local.html.markdown | 89 ++-- .../provisioners/chef-client.html.markdown | 180 +++---- .../docs/provisioners/chef-solo.html.markdown | 172 ++++--- .../docs/provisioners/custom.html.markdown | 19 +- .../docs/provisioners/file.html.markdown | 68 +-- .../provisioners/powershell.html.markdown | 87 ++-- .../puppet-masterless.html.markdown | 146 +++--- .../provisioners/puppet-server.html.markdown | 83 +-- .../salt-masterless.html.markdown | 51 +- .../docs/provisioners/shell.html.markdown | 234 ++++----- .../docs/templates/builders.html.markdown | 76 +-- .../configuration-templates.html.markdown | 194 ++++--- .../docs/templates/introduction.html.markdown | 100 ++-- .../templates/post-processors.html.markdown | 129 ++--- .../docs/templates/provisioners.html.markdown | 114 ++--- .../source/docs/templates/push.html.markdown | 58 +-- .../templates/user-variables.html.markdown | 150 +++--- .../templates/veewee-to-packer.html.markdown | 65 +-- .../getting-started/build-image.html.markdown | 167 +++--- .../intro/getting-started/next.html.markdown | 40 +- .../parallel-builds.html.markdown | 188 +++---- .../getting-started/provision.html.markdown | 118 ++--- .../remote-builds.html.markdown | 98 ++-- .../intro/getting-started/setup.html.markdown | 66 +-- .../getting-started/vagrant.html.markdown | 78 +-- .../intro/hashicorp-ecosystem.html.markdown | 65 ++- website/source/intro/index.html.markdown | 43 +- website/source/intro/platforms.html.markdown | 100 ++-- website/source/intro/use-cases.html.markdown | 67 +-- website/source/intro/why.html.markdown | 64 +-- 83 files changed, 4946 insertions(+), 4622 deletions(-) diff --git a/website/source/community/index.html.markdown b/website/source/community/index.html.markdown index 1b21e818a..f4069fbdf 100644 --- a/website/source/community/index.html.markdown +++ b/website/source/community/index.html.markdown @@ -1,22 +1,25 @@ --- -layout: "community" -page_title: "Community" -description: |- - Packer is a new project with a growing community. Despite this, there are dedicated users willing to help through various mediums. ---- +description: | + Packer is a new project with a growing community. Despite this, there are + dedicated users willing to help through various mediums. +layout: community +page_title: Community +... # Community Packer is a new project with a growing community. Despite this, there are dedicated users willing to help through various mediums. -**IRC:** `#packer-tool` on Freenode. +**IRC:** `#packer-tool` on Freenode. -**Mailing List:** [Packer Google Group](http://groups.google.com/group/packer-tool) +**Mailing List:** [Packer Google +Group](http://groups.google.com/group/packer-tool) -**Bug Tracker:** [Issue tracker on GitHub](https://github.com/mitchellh/packer/issues). -Please only use this for reporting bugs. Do not ask for general help here. Use IRC -or the mailing list for that. +**Bug Tracker:** [Issue tracker on +GitHub](https://github.com/mitchellh/packer/issues). Please only use this for +reporting bugs. Do not ask for general help here. Use IRC or the mailing list +for that. ## People @@ -25,62 +28,70 @@ to Packer in some core way. Over time, faces may appear and disappear from this list as contributors come and go.
    -
    - -
    -

    Mitchell Hashimoto (@mitchellh)

    -

    - Mitchell Hashimoto is the creator of Packer. He developed the - core of Packer as well as the Amazon, VirtualBox, and VMware - builders. In addition to Packer, Mitchell is the creator of - Vagrant. He is self - described as "automation obsessed." -

    -
    -
    -
    - -
    -

    Jack Pearkes (@pearkes)

    -

    - Jack Pearkes created and maintains the DigitalOcean builder - for Packer. Outside of Packer, Jack is an avid open source - contributor and software consultant.

    -
    -
    +
    + +
    +

    Mitchell Hashimoto (@mitchellh)

    +

    + Mitchell Hashimoto is the creator of Packer. He developed the + core of Packer as well as the Amazon, VirtualBox, and VMware + builders. In addition to Packer, Mitchell is the creator of + Vagrant. He is self + described as "automation obsessed." +

    +
    +
    -
    - -
    -

    Mark Peek (@markpeek)

    -

    - In addition to Packer, Mark Peek helps maintain - various open source projects such as - cloudtools and - IronPort Python libraries. - Mark is also a FreeBSD committer.

    -
    -
    +
    + +
    +

    Jack Pearkes (@pearkes)

    +

    + Jack Pearkes created and maintains the DigitalOcean builder + for Packer. Outside of Packer, Jack is an avid open source + contributor and software consultant.

    +
    +
    -
    - -
    -

    Ross Smith II (@rasa)

    -

    -Ross Smith maintains our VMware builder on Windows, and provides other valuable assistance. -Ross is an open source enthusiast, published author, and freelance consultant.

    -
    -
    +
    + +
    +

    Mark Peek (@markpeek)

    +

    + In addition to Packer, Mark Peek helps maintain + various open source projects such as + cloudtools and + IronPort Python libraries. + Mark is also a FreeBSD committer.

    +
    +
    -
    - -
    -

    Rickard von Essen
    (@rickard-von-essen)

    -

    -Rickard von Essen maintains our Parallels Desktop builder. Rickard is an polyglot programmer and consults on Continuous Delivery.

    -
    -
    +
    + +
    +

    Ross Smith II (@rasa)

    +

    + +Ross Smith maintains our +VMware builder on Windows, and provides other valuable assistance. Ross is an +open source enthusiast, published author, and freelance consultant. +

    +
    +
    + +
    + +
    +

    Rickard von Essen
    (@rickard-von-essen)

    +

    + +Rickard von Essen maintains our Parallels Desktop builder. Rickard is an +polyglot programmer and consults on Continuous Delivery. +

    +
    +
    + +
    -
    diff --git a/website/source/docs/basics/terminology.html.markdown b/website/source/docs/basics/terminology.html.markdown index 4fce2cc79..800478143 100644 --- a/website/source/docs/basics/terminology.html.markdown +++ b/website/source/docs/basics/terminology.html.markdown @@ -1,54 +1,57 @@ --- -layout: "docs" -page_title: "Packer Terminology" -description: |- - There are a handful of terms used throughout the Packer documentation where the meaning may not be immediately obvious if you haven't used Packer before. Luckily, there are relatively few. This page documents all the terminology required to understand and use Packer. The terminology is in alphabetical order for easy referencing. ---- +description: | + There are a handful of terms used throughout the Packer documentation where the + meaning may not be immediately obvious if you haven't used Packer before. + Luckily, there are relatively few. This page documents all the terminology + required to understand and use Packer. The terminology is in alphabetical order + for easy referencing. +layout: docs +page_title: Packer Terminology +... # Packer Terminology -There are a handful of terms used throughout the Packer documentation where -the meaning may not be immediately obvious if you haven't used Packer before. +There are a handful of terms used throughout the Packer documentation where the +meaning may not be immediately obvious if you haven't used Packer before. Luckily, there are relatively few. This page documents all the terminology -required to understand and use Packer. The terminology is in alphabetical -order for easy referencing. +required to understand and use Packer. The terminology is in alphabetical order +for easy referencing. -- `Artifacts` are the results of a single build, and are usually a set of IDs -or files to represent a machine image. Every builder produces a single -artifact. As an example, in the case of the Amazon EC2 builder, the artifact is -a set of AMI IDs (one per region). For the VMware builder, the artifact is a -directory of files comprising the created virtual machine. +- `Artifacts` are the results of a single build, and are usually a set of IDs or + files to represent a machine image. Every builder produces a single artifact. + As an example, in the case of the Amazon EC2 builder, the artifact is a set of + AMI IDs (one per region). For the VMware builder, the artifact is a directory + of files comprising the created virtual machine. -- `Builds` are a single task that eventually produces an image for a single -platform. Multiple builds run in parallel. Example usage in a -sentence: "The Packer build produced an AMI to run our web application." -Or: "Packer is running the builds now for VMware, AWS, and VirtualBox." +- `Builds` are a single task that eventually produces an image for a + single platform. Multiple builds run in parallel. Example usage in a sentence: + "The Packer build produced an AMI to run our web application." Or: "Packer is + running the builds now for VMware, AWS, and VirtualBox." -- `Builders` are components of Packer that are able to create a machine -image for a single platform. Builders read in some configuration and use -that to run and generate a machine image. A builder is invoked as part of a -build in order to create the actual resulting images. Example builders include -VirtualBox, VMware, and Amazon EC2. Builders can be created and added to -Packer in the form of plugins. +- `Builders` are components of Packer that are able to create a machine image + for a single platform. Builders read in some configuration and use that to run + and generate a machine image. A builder is invoked as part of a build in order + to create the actual resulting images. Example builders include VirtualBox, + VMware, and Amazon EC2. Builders can be created and added to Packer in the + form of plugins. -- `Commands` are sub-commands for the `packer` program that perform some -job. An example command is "build", which is invoked as `packer build`. -Packer ships with a set of commands out of the box in order to define -its command-line interface. Commands can also be created and added to -Packer in the form of plugins. +- `Commands` are sub-commands for the `packer` program that perform some job. An + example command is "build", which is invoked as `packer build`. Packer ships + with a set of commands out of the box in order to define its + command-line interface. Commands can also be created and added to Packer in + the form of plugins. -- `Post-processors` are components of Packer that take the result of -a builder or another post-processor and process that to -create a new artifact. Examples of post-processors are -compress to compress artifacts, upload to upload artifacts, etc. +- `Post-processors` are components of Packer that take the result of a builder + or another post-processor and process that to create a new artifact. Examples + of post-processors are compress to compress artifacts, upload to upload + artifacts, etc. -- `Provisioners` are components of Packer that install and configure -software within a running machine prior to that machine being turned -into a static image. They perform the major work of making the image contain -useful software. Example provisioners include shell scripts, Chef, Puppet, -etc. +- `Provisioners` are components of Packer that install and configure software + within a running machine prior to that machine being turned into a + static image. They perform the major work of making the image contain + useful software. Example provisioners include shell scripts, Chef, + Puppet, etc. -- `Templates` are JSON files which define one or more builds -by configuring the various components of Packer. Packer is able to read a -template and use that information to create multiple machine images in -parallel. +- `Templates` are JSON files which define one or more builds by configuring the + various components of Packer. Packer is able to read a template and use that + information to create multiple machine images in parallel. diff --git a/website/source/docs/builders/amazon-chroot.html.markdown b/website/source/docs/builders/amazon-chroot.html.markdown index d6b61ca8b..c3e16a982 100644 --- a/website/source/docs/builders/amazon-chroot.html.markdown +++ b/website/source/docs/builders/amazon-chroot.html.markdown @@ -1,49 +1,52 @@ --- -layout: "docs" -page_title: "Amazon AMI Builder (chroot)" -description: |- - The `amazon-chroot` Packer builder is able to create Amazon AMIs backed by an EBS volume as the root device. For more information on the difference between instance storage and EBS-backed instances, storage for the root device section in the EC2 documentation. ---- +description: | + The `amazon-chroot` Packer builder is able to create Amazon AMIs backed by an + EBS volume as the root device. For more information on the difference between + instance storage and EBS-backed instances, storage for the root device section + in the EC2 documentation. +layout: docs +page_title: 'Amazon AMI Builder (chroot)' +... # AMI Builder (chroot) Type: `amazon-chroot` -The `amazon-chroot` Packer builder is able to create Amazon AMIs backed by -an EBS volume as the root device. For more information on the difference -between instance storage and EBS-backed instances, see the -["storage for the root device" section in the EC2 documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). +The `amazon-chroot` Packer builder is able to create Amazon AMIs backed by an +EBS volume as the root device. For more information on the difference between +instance storage and EBS-backed instances, see the ["storage for the root +device" section in the EC2 +documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). -The difference between this builder and the `amazon-ebs` builder is that -this builder is able to build an EBS-backed AMI without launching a new -EC2 instance. This can dramatically speed up AMI builds for organizations -who need the extra fast build. +The difference between this builder and the `amazon-ebs` builder is that this +builder is able to build an EBS-backed AMI without launching a new EC2 instance. +This can dramatically speed up AMI builds for organizations who need the extra +fast build. -~> **This is an advanced builder** If you're just getting -started with Packer, we recommend starting with the -[amazon-ebs builder](/docs/builders/amazon-ebs.html), which is -much easier to use. +\~> **This is an advanced builder** If you're just getting started with +Packer, we recommend starting with the [amazon-ebs +builder](/docs/builders/amazon-ebs.html), which is much easier to use. -The builder does _not_ manage AMIs. Once it creates an AMI and stores it -in your account, it is up to you to use, delete, etc. the AMI. +The builder does *not* manage AMIs. Once it creates an AMI and stores it in your +account, it is up to you to use, delete, etc. the AMI. ## How Does it Work? -This builder works by creating a new EBS volume from an existing source AMI -and attaching it into an already-running EC2 instance. Once attached, a -[chroot](http://en.wikipedia.org/wiki/Chroot) is used to provision the -system within that volume. After provisioning, the volume is detached, -snapshotted, and an AMI is made. +This builder works by creating a new EBS volume from an existing source AMI and +attaching it into an already-running EC2 instance. Once attached, a +[chroot](http://en.wikipedia.org/wiki/Chroot) is used to provision the system +within that volume. After provisioning, the volume is detached, snapshotted, and +an AMI is made. -Using this process, minutes can be shaved off the AMI creation process -because a new EC2 instance doesn't need to be launched. +Using this process, minutes can be shaved off the AMI creation process because a +new EC2 instance doesn't need to be launched. -There are some restrictions, however. The host EC2 instance where the -volume is attached to must be a similar system (generally the same OS -version, kernel versions, etc.) as the AMI being built. Additionally, -this process is much more expensive because the EC2 instance must be kept -running persistently in order to build AMIs, whereas the other AMI builders -start instances on-demand to build AMIs as needed. +There are some restrictions, however. The host EC2 instance where the volume is +attached to must be a similar system (generally the same OS version, kernel +versions, etc.) as the AMI being built. Additionally, this process is much more +expensive because the EC2 instance must be kept running persistently in order to +build AMIs, whereas the other AMI builders start instances on-demand to build +AMIs as needed. ## Configuration Reference @@ -52,107 +55,109 @@ segmented below into two categories: required and optional parameters. Within each category, the available configuration keys are alphabetized. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `access_key` (string) - The access key used to communicate with AWS. - If not specified, Packer will use the key from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set. - If the environmental variables aren't set and Packer is running on - an EC2 instance, Packer will check the instance metadata for IAM role - keys. +- `access_key` (string) - The access key used to communicate with AWS. If not + specified, Packer will use the key from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_ACCESS_KEY_ID` or + `AWS_ACCESS_KEY` (in that order), if set. If the environmental variables + aren't set and Packer is running on an EC2 instance, Packer will check the + instance metadata for IAM role keys. -* `ami_name` (string) - The name of the resulting AMI that will appear - when managing AMIs in the AWS console or via APIs. This must be unique. - To help make this unique, use a function like `timestamp` (see - [configuration templates](/docs/templates/configuration-templates.html) for more info) +- `ami_name` (string) - The name of the resulting AMI that will appear when + managing AMIs in the AWS console or via APIs. This must be unique. To help + make this unique, use a function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -* `secret_key` (string) - The secret key used to communicate with AWS. - If not specified, Packer will use the secret from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set. - If the environmental variables aren't set and Packer is running on - an EC2 instance, Packer will check the instance metadata for IAM role - keys. +- `secret_key` (string) - The secret key used to communicate with AWS. If not + specified, Packer will use the secret from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or + `AWS_SECRET_KEY` (in that order), if set. If the environmental variables + aren't set and Packer is running on an EC2 instance, Packer will check the + instance metadata for IAM role keys. -* `source_ami` (string) - The source AMI whose root volume will be copied - and provisioned on the currently running instance. This must be an - EBS-backed AMI with a root volume snapshot that you have access to. +- `source_ami` (string) - The source AMI whose root volume will be copied and + provisioned on the currently running instance. This must be an EBS-backed AMI + with a root volume snapshot that you have access to. ### Optional: -* `ami_description` (string) - The description to set for the resulting - AMI(s). By default this description is empty. +- `ami_description` (string) - The description to set for the resulting AMI(s). + By default this description is empty. -* `ami_groups` (array of strings) - A list of groups that have access - to launch the resulting AMI(s). By default no groups have permission - to launch the AMI. `all` will make the AMI publicly accessible. +- `ami_groups` (array of strings) - A list of groups that have access to launch + the resulting AMI(s). By default no groups have permission to launch the AMI. + `all` will make the AMI publicly accessible. -* `ami_product_codes` (array of strings) - A list of product codes to - associate with the AMI. By default no product codes are associated with - the AMI. +- `ami_product_codes` (array of strings) - A list of product codes to associate + with the AMI. By default no product codes are associated with the AMI. -* `ami_regions` (array of strings) - A list of regions to copy the AMI to. - Tags and attributes are copied along with the AMI. AMI copying takes time - depending on the size of the AMI, but will generally take many minutes. +- `ami_regions` (array of strings) - A list of regions to copy the AMI to. Tags + and attributes are copied along with the AMI. AMI copying takes time depending + on the size of the AMI, but will generally take many minutes. -* `ami_users` (array of strings) - A list of account IDs that have access - to launch the resulting AMI(s). By default no additional users other than the user - creating the AMI has permissions to launch it. +- `ami_users` (array of strings) - A list of account IDs that have access to + launch the resulting AMI(s). By default no additional users other than the + user creating the AMI has permissions to launch it. -* `ami_virtualization_type` (string) - The type of virtualization for the AMI +- `ami_virtualization_type` (string) - The type of virtualization for the AMI you are building. This option is required to register HVM images. Can be "paravirtual" (default) or "hvm". -* `chroot_mounts` (array of array of strings) - This is a list of additional +- `chroot_mounts` (array of array of strings) - This is a list of additional devices to mount into the chroot environment. This configuration parameter - requires some additional documentation which is in the "Chroot Mounts" section - below. Please read that section for more information on how to use this. + requires some additional documentation which is in the "Chroot Mounts" + section below. Please read that section for more information on how to + use this. -* `command_wrapper` (string) - How to run shell commands. This - defaults to "{{.Command}}". This may be useful to set if you want to set - environmental variables or perhaps run it with `sudo` or so on. This is a - configuration template where the `.Command` variable is replaced with the - command to be run. +- `command_wrapper` (string) - How to run shell commands. This defaults + to "{{.Command}}". This may be useful to set if you want to set environmental + variables or perhaps run it with `sudo` or so on. This is a configuration + template where the `.Command` variable is replaced with the command to be run. -* `copy_files` (array of strings) - Paths to files on the running EC2 instance - that will be copied into the chroot environment prior to provisioning. - This is useful, for example, to copy `/etc/resolv.conf` so that DNS lookups - work. +- `copy_files` (array of strings) - Paths to files on the running EC2 instance + that will be copied into the chroot environment prior to provisioning. This is + useful, for example, to copy `/etc/resolv.conf` so that DNS lookups work. -* `device_path` (string) - The path to the device where the root volume - of the source AMI will be attached. This defaults to "" (empty string), - which forces Packer to find an open device automatically. +- `device_path` (string) - The path to the device where the root volume of the + source AMI will be attached. This defaults to "" (empty string), which forces + Packer to find an open device automatically. -* `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on - HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy. +- `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) + on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS + IAM policy. -* `force_deregister` (boolean) - Force Packer to first deregister an existing -AMI if one with the same name already exists. Default `false`. +- `force_deregister` (boolean) - Force Packer to first deregister an existing + AMI if one with the same name already exists. Default `false`. -* `mount_path` (string) - The path where the volume will be mounted. This is +- `mount_path` (string) - The path where the volume will be mounted. This is where the chroot environment will be. This defaults to - `packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration - template where the `.Device` variable is replaced with the name of the - device where the volume is attached. + `packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration template + where the `.Device` variable is replaced with the name of the device where the + volume is attached. -* `mount_options` (array of strings) - Options to supply the `mount` command -when mounting devices. Each option will be prefixed with `-o ` and supplied to -the `mount` command ran by Packer. Because this command is ran in a shell, user -discrestion is advised. See [this manual page for the mount command][1] for valid -file system specific options +- `mount_options` (array of strings) - Options to supply the `mount` command + when mounting devices. Each option will be prefixed with `-o` and supplied to + the `mount` command ran by Packer. Because this command is ran in a shell, + user discrestion is advised. See [this manual page for the mount + command](http://linuxcommand.org/man_pages/mount8.html) for valid file system + specific options -* `root_volume_size` (integer) - The size of the root volume for the chroot -environment, and the resulting AMI +- `root_volume_size` (integer) - The size of the root volume for the chroot + environment, and the resulting AMI -* `tags` (object of key/value strings) - Tags applied to the AMI. +- `tags` (object of key/value strings) - Tags applied to the AMI. ## Basic Example Here is a basic example. It is completely valid except for the access keys: -```javascript +``` {.javascript} { "type": "amazon-chroot", "access_key": "YOUR KEY HERE", @@ -164,21 +169,21 @@ Here is a basic example. It is completely valid except for the access keys: ## Chroot Mounts -The `chroot_mounts` configuration can be used to mount additional devices -within the chroot. By default, the following additional mounts are added -into the chroot by Packer: +The `chroot_mounts` configuration can be used to mount additional devices within +the chroot. By default, the following additional mounts are added into the +chroot by Packer: -* `/proc` (proc) -* `/sys` (sysfs) -* `/dev` (bind to real `/dev`) -* `/dev/pts` (devpts) -* `/proc/sys/fs/binfmt_misc` (binfmt_misc) +- `/proc` (proc) +- `/sys` (sysfs) +- `/dev` (bind to real `/dev`) +- `/dev/pts` (devpts) +- `/proc/sys/fs/binfmt_misc` (binfmt\_misc) -These default mounts are usually good enough for anyone and are sane -defaults. However, if you want to change or add the mount points, you may -using the `chroot_mounts` configuration. Here is an example configuration: +These default mounts are usually good enough for anyone and are sane defaults. +However, if you want to change or add the mount points, you may using the +`chroot_mounts` configuration. Here is an example configuration: -```javascript +``` {.javascript} { "chroot_mounts": [ ["proc", "proc", "/proc"], @@ -187,25 +192,25 @@ using the `chroot_mounts` configuration. Here is an example configuration: } ``` -`chroot_mounts` is a list of a 3-tuples of strings. The three components -of the 3-tuple, in order, are: +`chroot_mounts` is a list of a 3-tuples of strings. The three components of the +3-tuple, in order, are: -* The filesystem type. If this is "bind", then Packer will properly bind - the filesystem to another mount point. +- The filesystem type. If this is "bind", then Packer will properly bind the + filesystem to another mount point. -* The source device. +- The source device. -* The mount directory. +- The mount directory. ## Parallelism -A quick note on parallelism: it is perfectly safe to run multiple -_separate_ Packer processes with the `amazon-chroot` builder on the same -EC2 instance. In fact, this is recommended as a way to push the most performance -out of your AMI builds. +A quick note on parallelism: it is perfectly safe to run multiple *separate* +Packer processes with the `amazon-chroot` builder on the same EC2 instance. In +fact, this is recommended as a way to push the most performance out of your AMI +builds. -Packer properly obtains a process lock for the parallelism-sensitive parts -of its internals such as finding an available device. +Packer properly obtains a process lock for the parallelism-sensitive parts of +its internals such as finding an available device. ## Gotchas @@ -213,10 +218,12 @@ One of the difficulties with using the chroot builder is that your provisioning scripts must not leave any processes running or packer will be unable to unmount the filesystem. -For debian based distributions you can setup a [policy-rc.d](http://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt) file which will -prevent packages installed by your provisioners from starting services: +For debian based distributions you can setup a +[policy-rc.d](http://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt) +file which will prevent packages installed by your provisioners from starting +services: -```javascript +``` {.javascript} { "type": "shell", "inline": [ @@ -235,6 +242,3 @@ prevent packages installed by your provisioners from starting services: ] } ``` - - -[1]: http://linuxcommand.org/man_pages/mount8.html diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index 69a9a5c04..cb6b7c9d5 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -1,29 +1,32 @@ --- -layout: "docs" -page_title: "Amazon AMI Builder (EBS backed)" -description: |- - The `amazon-ebs` Packer builder is able to create Amazon AMIs backed by EBS volumes for use in EC2. For more information on the difference between EBS-backed instances and instance-store backed instances, see the storage for the root device section in the EC2 documentation. ---- +description: | + The `amazon-ebs` Packer builder is able to create Amazon AMIs backed by EBS + volumes for use in EC2. For more information on the difference between + EBS-backed instances and instance-store backed instances, see the storage for + the root device section in the EC2 documentation. +layout: docs +page_title: 'Amazon AMI Builder (EBS backed)' +... # AMI Builder (EBS backed) Type: `amazon-ebs` The `amazon-ebs` Packer builder is able to create Amazon AMIs backed by EBS -volumes for use in [EC2](http://aws.amazon.com/ec2/). For more information -on the difference between EBS-backed instances and instance-store backed -instances, see the -["storage for the root device" section in the EC2 documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). +volumes for use in [EC2](http://aws.amazon.com/ec2/). For more information on +the difference between EBS-backed instances and instance-store backed instances, +see the ["storage for the root device" section in the EC2 +documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). This builder builds an AMI by launching an EC2 instance from a source AMI, provisioning that running machine, and then creating an AMI from that machine. This is all done in your own AWS account. The builder will create temporary -keypairs, security group rules, etc. that provide it temporary access to -the instance while the image is being created. This simplifies configuration -quite a bit. +keypairs, security group rules, etc. that provide it temporary access to the +instance while the image is being created. This simplifies configuration quite a +bit. -The builder does _not_ manage AMIs. Once it creates an AMI and stores it -in your account, it is up to you to use, delete, etc. the AMI. +The builder does *not* manage AMIs. Once it creates an AMI and stores it in your +account, it is up to you to use, delete, etc. the AMI. ## Configuration Reference @@ -32,170 +35,173 @@ segmented below into two categories: required and optional parameters. Within each category, the available configuration keys are alphabetized. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `access_key` (string) - The access key used to communicate with AWS. - If not specified, Packer will use the key from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set. +- `access_key` (string) - The access key used to communicate with AWS. If not + specified, Packer will use the key from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_ACCESS_KEY_ID` or + `AWS_ACCESS_KEY` (in that order), if set. -* `ami_name` (string) - The name of the resulting AMI that will appear - when managing AMIs in the AWS console or via APIs. This must be unique. - To help make this unique, use a function like `timestamp` (see - [configuration templates](/docs/templates/configuration-templates.html) for more info) +- `ami_name` (string) - The name of the resulting AMI that will appear when + managing AMIs in the AWS console or via APIs. This must be unique. To help + make this unique, use a function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -* `instance_type` (string) - The EC2 instance type to use while building - the AMI, such as "m1.small". +- `instance_type` (string) - The EC2 instance type to use while building the + AMI, such as "m1.small". -* `region` (string) - The name of the region, such as "us-east-1", in which - to launch the EC2 instance to create the AMI. +- `region` (string) - The name of the region, such as "us-east-1", in which to + launch the EC2 instance to create the AMI. -* `secret_key` (string) - The secret key used to communicate with AWS. - If not specified, Packer will use the secret from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set. +- `secret_key` (string) - The secret key used to communicate with AWS. If not + specified, Packer will use the secret from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or + `AWS_SECRET_KEY` (in that order), if set. -* `source_ami` (string) - The initial AMI used as a base for the newly +- `source_ami` (string) - The initial AMI used as a base for the newly created machine. -* `ssh_username` (string) - The username to use in order to communicate - over SSH to the running machine. +- `ssh_username` (string) - The username to use in order to communicate over SSH + to the running machine. ### Optional: -* `ami_block_device_mappings` (array of block device mappings) - Add the block +- `ami_block_device_mappings` (array of block device mappings) - Add the block device mappings to the AMI. The block device mappings allow for keys: - `device_name` (string) - The device name exposed to the instance (for - example, "/dev/sdh" or "xvdh") + example, "/dev/sdh" or "xvdh") - `virtual_name` (string) - The virtual device name. See the documentation on - [Block Device Mapping][1] for more information + [Block Device + Mapping](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) + for more information - `snapshot_id` (string) - The ID of the snapshot - `volume_type` (string) - The volume type. gp2 for General Purpose (SSD) - volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic - volumes + volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic + volumes - `volume_size` (integer) - The size of the volume, in GiB. Required if not - specifying a `snapshot_id` + specifying a `snapshot_id` - `delete_on_termination` (boolean) - Indicates whether the EBS volume is - deleted on instance termination + deleted on instance termination - `encrypted` (boolean) - Indicates whether to encrypt the volume or not - `no_device` (boolean) - Suppresses the specified device included in the - block device mapping of the AMI + block device mapping of the AMI - `iops` (integer) - The number of I/O operations per second (IOPS) that the - volume supports. See the documentation on [IOPs][2] for more information + volume supports. See the documentation on + [IOPs](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) + for more information +- `ami_description` (string) - The description to set for the resulting AMI(s). + By default this description is empty. +- `ami_groups` (array of strings) - A list of groups that have access to launch + the resulting AMI(s). By default no groups have permission to launch the AMI. + `all` will make the AMI publicly accessible. AWS currently doesn't accept any + value other than "all". -* `ami_description` (string) - The description to set for the resulting - AMI(s). By default this description is empty. +- `ami_product_codes` (array of strings) - A list of product codes to associate + with the AMI. By default no product codes are associated with the AMI. -* `ami_groups` (array of strings) - A list of groups that have access - to launch the resulting AMI(s). By default no groups have permission - to launch the AMI. `all` will make the AMI publicly accessible. - AWS currently doesn't accept any value other than "all". +- `ami_regions` (array of strings) - A list of regions to copy the AMI to. Tags + and attributes are copied along with the AMI. AMI copying takes time depending + on the size of the AMI, but will generally take many minutes. -* `ami_product_codes` (array of strings) - A list of product codes to - associate with the AMI. By default no product codes are associated with - the AMI. +- `ami_users` (array of strings) - A list of account IDs that have access to + launch the resulting AMI(s). By default no additional users other than the + user creating the AMI has permissions to launch it. -* `ami_regions` (array of strings) - A list of regions to copy the AMI to. - Tags and attributes are copied along with the AMI. AMI copying takes time - depending on the size of the AMI, but will generally take many minutes. - -* `ami_users` (array of strings) - A list of account IDs that have access - to launch the resulting AMI(s). By default no additional users other than the user - creating the AMI has permissions to launch it. - -* `associate_public_ip_address` (boolean) - If using a non-default VPC, public +- `associate_public_ip_address` (boolean) - If using a non-default VPC, public IP addresses are not provided by default. If this is toggled, your new instance will get a Public IP. -* `availability_zone` (string) - Destination availability zone to launch instance in. - Leave this empty to allow Amazon to auto-assign. +- `availability_zone` (string) - Destination availability zone to launch + instance in. Leave this empty to allow Amazon to auto-assign. -* `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on - HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy. +- `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) + on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS + IAM policy. -* `force_deregister` (boolean) - Force Packer to first deregister an existing -AMI if one with the same name already exists. Default `false`. +- `force_deregister` (boolean) - Force Packer to first deregister an existing + AMI if one with the same name already exists. Default `false`. -* `iam_instance_profile` (string) - The name of an - [IAM instance profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) +- `iam_instance_profile` (string) - The name of an [IAM instance + profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) to launch the EC2 instance with. -* `launch_block_device_mappings` (array of block device mappings) - Add the +- `launch_block_device_mappings` (array of block device mappings) - Add the block device mappings to the launch instance. The block device mappings are the same as `ami_block_device_mappings` above. -* `run_tags` (object of key/value strings) - Tags to apply to the instance - that is _launched_ to create the AMI. These tags are _not_ applied to - the resulting AMI unless they're duplicated in `tags`. +- `run_tags` (object of key/value strings) - Tags to apply to the instance that + is *launched* to create the AMI. These tags are *not* applied to the resulting + AMI unless they're duplicated in `tags`. -* `security_group_id` (string) - The ID (_not_ the name) of the security - group to assign to the instance. By default this is not set and Packer - will automatically create a new temporary security group to allow SSH - access. Note that if this is specified, you must be sure the security - group allows access to the `ssh_port` given below. +- `security_group_id` (string) - The ID (*not* the name) of the security group + to assign to the instance. By default this is not set and Packer will + automatically create a new temporary security group to allow SSH access. Note + that if this is specified, you must be sure the security group allows access + to the `ssh_port` given below. -* `security_group_ids` (array of strings) - A list of security groups as +- `security_group_ids` (array of strings) - A list of security groups as described above. Note that if this is specified, you must omit the `security_group_id`. -* `spot_price` (string) - The maximum hourly price to pay for a spot instance - to create the AMI. Spot instances are a type of instance that EC2 starts when - the current spot price is less than the maximum price you specify. Spot price - will be updated based on available spot instance capacity and current spot +- `spot_price` (string) - The maximum hourly price to pay for a spot instance to + create the AMI. Spot instances are a type of instance that EC2 starts when the + current spot price is less than the maximum price you specify. Spot price will + be updated based on available spot instance capacity and current spot instance requests. It may save you some costs. You can set this to "auto" for Packer to automatically discover the best spot price. -* `spot_price_auto_product` (string) - Required if `spot_price` is set to - "auto". This tells Packer what sort of AMI you're launching to find the best - spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, - `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` +- `spot_price_auto_product` (string) - Required if `spot_price` is set + to "auto". This tells Packer what sort of AMI you're launching to find the + best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, + `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` -* `ssh_keypair_name` (string) - If specified, this is the key that will be - used for SSH with the machine. By default, this is blank, and Packer will - generate a temporary keypair. `ssh_private_key_file` must be specified - with this. +- `ssh_keypair_name` (string) - If specified, this is the key that will be used + for SSH with the machine. By default, this is blank, and Packer will generate + a temporary keypair. `ssh_private_key_file` must be specified with this. -* `ssh_private_ip` (boolean) - If true, then SSH will always use the private - IP if available. +- `ssh_private_ip` (boolean) - If true, then SSH will always use the private IP + if available. -* `subnet_id` (string) - If using VPC, the ID of the subnet, such as +- `subnet_id` (string) - If using VPC, the ID of the subnet, such as "subnet-12345def", where Packer will launch the EC2 instance. This field is required if you are using an non-default VPC. -* `tags` (object of key/value strings) - Tags applied to the AMI and +- `tags` (object of key/value strings) - Tags applied to the AMI and relevant snapshots. -* `temporary_key_pair_name` (string) - The name of the temporary keypair +- `temporary_key_pair_name` (string) - The name of the temporary keypair to generate. By default, Packer generates a name with a UUID. -* `token` (string) - The access token to use. This is different from - the access key and secret key. If you're not sure what this is, then you - probably don't need it. This will also be read from the `AWS_SECURITY_TOKEN` +- `token` (string) - The access token to use. This is different from the access + key and secret key. If you're not sure what this is, then you probably don't + need it. This will also be read from the `AWS_SECURITY_TOKEN` environmental variable. -* `user_data` (string) - User data to apply when launching the instance. - Note that you need to be careful about escaping characters due to the - templates being JSON. It is often more convenient to use `user_data_file`, - instead. +- `user_data` (string) - User data to apply when launching the instance. Note + that you need to be careful about escaping characters due to the templates + being JSON. It is often more convenient to use `user_data_file`, instead. -* `user_data_file` (string) - Path to a file that will be used for the - user data when launching the instance. +- `user_data_file` (string) - Path to a file that will be used for the user data + when launching the instance. -* `vpc_id` (string) - If launching into a VPC subnet, Packer needs the - VPC ID in order to create a temporary security group within the VPC. +- `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID in + order to create a temporary security group within the VPC. -* `windows_password_timeout` (string) - The timeout for waiting for - a Windows password for Windows instances. Defaults to 20 minutes. - Example value: "10m" +- `windows_password_timeout` (string) - The timeout for waiting for a Windows + password for Windows instances. Defaults to 20 minutes. Example value: "10m" ## Basic Example Here is a basic example. It is completely valid except for the access keys: -```javascript +``` {.javascript} { "type": "amazon-ebs", "access_key": "YOUR KEY HERE", @@ -208,25 +214,23 @@ Here is a basic example. It is completely valid except for the access keys: } ``` --> **Note:** Packer can also read the access key and secret -access key from environmental variables. See the configuration reference in -the section above for more information on what environmental variables Packer -will look for. +-> **Note:** Packer can also read the access key and secret access key from +environmental variables. See the configuration reference in the section above +for more information on what environmental variables Packer will look for. ## Accessing the Instance to Debug If you need to access the instance to debug for some reason, run the builder -with the `-debug` flag. In debug mode, the Amazon builder will save the -private key in the current directory and will output the DNS or IP information -as well. You can use this information to access the instance as it is -running. +with the `-debug` flag. In debug mode, the Amazon builder will save the private +key in the current directory and will output the DNS or IP information as well. +You can use this information to access the instance as it is running. ## AMI Block Device Mappings Example Here is an example using the optional AMI block device mappings. This will add the /dev/sdb and /dev/sdc block device mappings to the finished AMI. -```javascript +``` {.javascript} { "type": "amazon-ebs", "access_key": "YOUR KEY HERE", @@ -252,9 +256,9 @@ the /dev/sdb and /dev/sdc block device mappings to the finished AMI. ## Tag Example Here is an example using the optional AMI tags. This will add the tags -"OS_Version" and "Release" to the finished AMI. +"OS\_Version" and "Release" to the finished AMI. -```javascript +``` {.javascript} { "type": "amazon-ebs", "access_key": "YOUR KEY HERE", @@ -271,13 +275,10 @@ Here is an example using the optional AMI tags. This will add the tags } ``` --> **Note:** Packer uses pre-built AMIs as the source for building images. +-> **Note:** Packer uses pre-built AMIs as the source for building images. These source AMIs may include volumes that are not flagged to be destroyed on termiation of the instance building the new image. Packer will attempt to clean up all residual volumes that are not designated by the user to remain after termination. If you need to preserve those source volumes, you can overwrite the termination setting by specifying `delete_on_termination=false` in the `launch_device_mappings` block for the device. - -[1]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html -[2]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index fa3c8a190..5ff36ccf2 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -1,9 +1,12 @@ --- -layout: "docs" -page_title: "Amazon AMI Builder (instance-store)" -description: |- - The `amazon-instance` Packer builder is able to create Amazon AMIs backed by instance storage as the root device. For more information on the difference between instance storage and EBS-backed instances, see the storage for the root device section in the EC2 documentation. ---- +description: | + The `amazon-instance` Packer builder is able to create Amazon AMIs backed by + instance storage as the root device. For more information on the difference + between instance storage and EBS-backed instances, see the storage for the root + device section in the EC2 documentation. +layout: docs +page_title: 'Amazon AMI Builder (instance-store)' +... # AMI Builder (instance-store) @@ -11,24 +14,24 @@ Type: `amazon-instance` The `amazon-instance` Packer builder is able to create Amazon AMIs backed by instance storage as the root device. For more information on the difference -between instance storage and EBS-backed instances, see the -["storage for the root device" section in the EC2 documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). +between instance storage and EBS-backed instances, see the ["storage for the +root device" section in the EC2 +documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). This builder builds an AMI by launching an EC2 instance from an existing instance-storage backed AMI, provisioning that running machine, and then -bundling and creating a new AMI from that machine. -This is all done in your own AWS account. The builder will create temporary -keypairs, security group rules, etc. that provide it temporary access to -the instance while the image is being created. This simplifies configuration -quite a bit. +bundling and creating a new AMI from that machine. This is all done in your own +AWS account. The builder will create temporary keypairs, security group rules, +etc. that provide it temporary access to the instance while the image is being +created. This simplifies configuration quite a bit. -The builder does _not_ manage AMIs. Once it creates an AMI and stores it -in your account, it is up to you to use, delete, etc. the AMI. +The builder does *not* manage AMIs. Once it creates an AMI and stores it in your +account, it is up to you to use, delete, etc. the AMI. --> **Note** This builder requires that the -[Amazon EC2 AMI Tools](http://aws.amazon.com/developertools/368) -are installed onto the machine. This can be done within a provisioner, but -must be done before the builder finishes running. +-> **Note** This builder requires that the [Amazon EC2 AMI +Tools](http://aws.amazon.com/developertools/368) are installed onto the machine. +This can be done within a provisioner, but must be done before the builder +finishes running. ## Configuration Reference @@ -37,204 +40,207 @@ segmented below into two categories: required and optional parameters. Within each category, the available configuration keys are alphabetized. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `access_key` (string) - The access key used to communicate with AWS. - If not specified, Packer will use the key from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set. +- `access_key` (string) - The access key used to communicate with AWS. If not + specified, Packer will use the key from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_ACCESS_KEY_ID` or + `AWS_ACCESS_KEY` (in that order), if set. -* `account_id` (string) - Your AWS account ID. This is required for bundling - the AMI. This is _not the same_ as the access key. You can find your - account ID in the security credentials page of your AWS account. +- `account_id` (string) - Your AWS account ID. This is required for bundling + the AMI. This is *not the same* as the access key. You can find your account + ID in the security credentials page of your AWS account. -* `ami_name` (string) - The name of the resulting AMI that will appear - when managing AMIs in the AWS console or via APIs. This must be unique. - To help make this unique, use a function like `timestamp` (see - [configuration templates](/docs/templates/configuration-templates.html) for more info) +- `ami_name` (string) - The name of the resulting AMI that will appear when + managing AMIs in the AWS console or via APIs. This must be unique. To help + make this unique, use a function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -* `instance_type` (string) - The EC2 instance type to use while building - the AMI, such as "m1.small". +- `instance_type` (string) - The EC2 instance type to use while building the + AMI, such as "m1.small". -* `region` (string) - The name of the region, such as "us-east-1", in which - to launch the EC2 instance to create the AMI. +- `region` (string) - The name of the region, such as "us-east-1", in which to + launch the EC2 instance to create the AMI. -* `s3_bucket` (string) - The name of the S3 bucket to upload the AMI. - This bucket will be created if it doesn't exist. +- `s3_bucket` (string) - The name of the S3 bucket to upload the AMI. This + bucket will be created if it doesn't exist. -* `secret_key` (string) - The secret key used to communicate with AWS. - If not specified, Packer will use the secret from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set. +- `secret_key` (string) - The secret key used to communicate with AWS. If not + specified, Packer will use the secret from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or + `AWS_SECRET_KEY` (in that order), if set. -* `source_ami` (string) - The initial AMI used as a base for the newly +- `source_ami` (string) - The initial AMI used as a base for the newly created machine. -* `ssh_username` (string) - The username to use in order to communicate - over SSH to the running machine. +- `ssh_username` (string) - The username to use in order to communicate over SSH + to the running machine. -* `x509_cert_path` (string) - The local path to a valid X509 certificate for +- `x509_cert_path` (string) - The local path to a valid X509 certificate for your AWS account. This is used for bundling the AMI. This X509 certificate - must be registered with your account from the security credentials page - in the AWS console. + must be registered with your account from the security credentials page in the + AWS console. -* `x509_key_path` (string) - The local path to the private key for the X509 +- `x509_key_path` (string) - The local path to the private key for the X509 certificate specified by `x509_cert_path`. This is used for bundling the AMI. ### Optional: -* `ami_block_device_mappings` (array of block device mappings) - Add the block +- `ami_block_device_mappings` (array of block device mappings) - Add the block device mappings to the AMI. The block device mappings allow for keys: - `device_name` (string) - The device name exposed to the instance (for - example, "/dev/sdh" or "xvdh") + example, "/dev/sdh" or "xvdh") - `virtual_name` (string) - The virtual device name. See the documentation on - [Block Device Mapping][1] for more information + [Block Device + Mapping](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) + for more information - `snapshot_id` (string) - The ID of the snapshot - `volume_type` (string) - The volume type. gp2 for General Purpose (SSD) - volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic - volumes + volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic + volumes - `volume_size` (integer) - The size of the volume, in GiB. Required if not - specifying a `snapshot_id` + specifying a `snapshot_id` - `delete_on_termination` (boolean) - Indicates whether the EBS volume is - deleted on instance termination + deleted on instance termination - `encrypted` (boolean) - Indicates whether to encrypt the volume or not - `no_device` (boolean) - Suppresses the specified device included in the - block device mapping of the AMI + block device mapping of the AMI - `iops` (integer) - The number of I/O operations per second (IOPS) that the - volume supports. See the documentation on [IOPs][2] for more information + volume supports. See the documentation on + [IOPs](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) + for more information +- `ami_description` (string) - The description to set for the resulting AMI(s). + By default this description is empty. -* `ami_description` (string) - The description to set for the resulting - AMI(s). By default this description is empty. +- `ami_groups` (array of strings) - A list of groups that have access to launch + the resulting AMI(s). By default no groups have permission to launch the AMI. + `all` will make the AMI publicly accessible. AWS currently doesn't accept any + value other than "all". -* `ami_groups` (array of strings) - A list of groups that have access - to launch the resulting AMI(s). By default no groups have permission - to launch the AMI. `all` will make the AMI publicly accessible. - AWS currently doesn't accept any value other than "all". +- `ami_product_codes` (array of strings) - A list of product codes to associate + with the AMI. By default no product codes are associated with the AMI. -* `ami_product_codes` (array of strings) - A list of product codes to - associate with the AMI. By default no product codes are associated with - the AMI. +- `ami_regions` (array of strings) - A list of regions to copy the AMI to. Tags + and attributes are copied along with the AMI. AMI copying takes time depending + on the size of the AMI, but will generally take many minutes. -* `ami_regions` (array of strings) - A list of regions to copy the AMI to. - Tags and attributes are copied along with the AMI. AMI copying takes time - depending on the size of the AMI, but will generally take many minutes. +- `ami_users` (array of strings) - A list of account IDs that have access to + launch the resulting AMI(s). By default no additional users other than the + user creating the AMI has permissions to launch it. -* `ami_users` (array of strings) - A list of account IDs that have access - to launch the resulting AMI(s). By default no additional users other than the user - creating the AMI has permissions to launch it. - -* `ami_virtualization_type` (string) - The type of virtualization for the AMI +- `ami_virtualization_type` (string) - The type of virtualization for the AMI you are building. This option is required to register HVM images. Can be "paravirtual" (default) or "hvm". -* `associate_public_ip_address` (boolean) - If using a non-default VPC, public +- `associate_public_ip_address` (boolean) - If using a non-default VPC, public IP addresses are not provided by default. If this is toggled, your new - instance will get a Public IP. + instance will get a Public IP. -* `availability_zone` (string) - Destination availability zone to launch instance in. - Leave this empty to allow Amazon to auto-assign. +- `availability_zone` (string) - Destination availability zone to launch + instance in. Leave this empty to allow Amazon to auto-assign. -* `bundle_destination` (string) - The directory on the running instance - where the bundled AMI will be saved prior to uploading. By default this is - "/tmp". This directory must exist and be writable. +- `bundle_destination` (string) - The directory on the running instance where + the bundled AMI will be saved prior to uploading. By default this is "/tmp". + This directory must exist and be writable. -* `bundle_prefix` (string) - The prefix for files created from bundling - the root volume. By default this is "image-{{timestamp}}". The `timestamp` - variable should be used to make sure this is unique, otherwise it can - collide with other created AMIs by Packer in your account. +- `bundle_prefix` (string) - The prefix for files created from bundling the + root volume. By default this is "image-{{timestamp}}". The `timestamp` + variable should be used to make sure this is unique, otherwise it can collide + with other created AMIs by Packer in your account. -* `bundle_upload_command` (string) - The command to use to upload the - bundled volume. See the "custom bundle commands" section below for more - information. +- `bundle_upload_command` (string) - The command to use to upload the + bundled volume. See the "custom bundle commands" section below for + more information. -* `bundle_vol_command` (string) - The command to use to bundle the volume. - See the "custom bundle commands" section below for more information. +- `bundle_vol_command` (string) - The command to use to bundle the volume. See + the "custom bundle commands" section below for more information. -* `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on - HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy. +- `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) + on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS + IAM policy. -* `force_deregister` (boolean) - Force Packer to first deregister an existing -AMI if one with the same name already exists. Default `false`. +- `force_deregister` (boolean) - Force Packer to first deregister an existing + AMI if one with the same name already exists. Default `false`. -* `iam_instance_profile` (string) - The name of an - [IAM instance profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) +- `iam_instance_profile` (string) - The name of an [IAM instance + profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) to launch the EC2 instance with. -* `launch_block_device_mappings` (array of block device mappings) - Add the +- `launch_block_device_mappings` (array of block device mappings) - Add the block device mappings to the launch instance. The block device mappings are the same as `ami_block_device_mappings` above. -* `run_tags` (object of key/value strings) - Tags to apply to the instance - that is _launched_ to create the AMI. These tags are _not_ applied to - the resulting AMI unless they're duplicated in `tags`. +- `run_tags` (object of key/value strings) - Tags to apply to the instance that + is *launched* to create the AMI. These tags are *not* applied to the resulting + AMI unless they're duplicated in `tags`. -* `security_group_id` (string) - The ID (_not_ the name) of the security - group to assign to the instance. By default this is not set and Packer - will automatically create a new temporary security group to allow SSH - access. Note that if this is specified, you must be sure the security - group allows access to the `ssh_port` given below. +- `security_group_id` (string) - The ID (*not* the name) of the security group + to assign to the instance. By default this is not set and Packer will + automatically create a new temporary security group to allow SSH access. Note + that if this is specified, you must be sure the security group allows access + to the `ssh_port` given below. -* `security_group_ids` (array of strings) - A list of security groups as +- `security_group_ids` (array of strings) - A list of security groups as described above. Note that if this is specified, you must omit the `security_group_id`. -* `spot_price` (string) - The maximum hourly price to launch a spot instance - to create the AMI. It is a type of instances that EC2 starts when the maximum +- `spot_price` (string) - The maximum hourly price to launch a spot instance to + create the AMI. It is a type of instances that EC2 starts when the maximum price that you specify exceeds the current spot price. Spot price will be - updated based on available spot instance capacity and current spot Instance - requests. It may save you some costs. You can set this to "auto" for + updated based on available spot instance capacity and current spot + Instance requests. It may save you some costs. You can set this to "auto" for Packer to automatically discover the best spot price. -* `spot_price_auto_product` (string) - Required if `spot_price` is set to - "auto". This tells Packer what sort of AMI you're launching to find the best - spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, - `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` +- `spot_price_auto_product` (string) - Required if `spot_price` is set + to "auto". This tells Packer what sort of AMI you're launching to find the + best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, + `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` -* `ssh_keypair_name` (string) - If specified, this is the key that will be - used for SSH with the machine. By default, this is blank, and Packer will - generate a temporary keypair. `ssh_private_key_file` must be specified - with this. +- `ssh_keypair_name` (string) - If specified, this is the key that will be used + for SSH with the machine. By default, this is blank, and Packer will generate + a temporary keypair. `ssh_private_key_file` must be specified with this. -* `ssh_private_ip` (boolean) - If true, then SSH will always use the private - IP if available. +- `ssh_private_ip` (boolean) - If true, then SSH will always use the private IP + if available. -* `subnet_id` (string) - If using VPC, the ID of the subnet, such as +- `subnet_id` (string) - If using VPC, the ID of the subnet, such as "subnet-12345def", where Packer will launch the EC2 instance. This field is required if you are using an non-default VPC. -* `tags` (object of key/value strings) - Tags applied to the AMI. +- `tags` (object of key/value strings) - Tags applied to the AMI. -* `temporary_key_pair_name` (string) - The name of the temporary keypair +- `temporary_key_pair_name` (string) - The name of the temporary keypair to generate. By default, Packer generates a name with a UUID. -* `user_data` (string) - User data to apply when launching the instance. - Note that you need to be careful about escaping characters due to the - templates being JSON. It is often more convenient to use `user_data_file`, - instead. +- `user_data` (string) - User data to apply when launching the instance. Note + that you need to be careful about escaping characters due to the templates + being JSON. It is often more convenient to use `user_data_file`, instead. -* `user_data_file` (string) - Path to a file that will be used for the - user data when launching the instance. +- `user_data_file` (string) - Path to a file that will be used for the user data + when launching the instance. -* `vpc_id` (string) - If launching into a VPC subnet, Packer needs the - VPC ID in order to create a temporary security group within the VPC. +- `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID in + order to create a temporary security group within the VPC. -* `x509_upload_path` (string) - The path on the remote machine where the - X509 certificate will be uploaded. This path must already exist and be - writable. X509 certificates are uploaded after provisioning is run, so - it is perfectly okay to create this directory as part of the provisioning - process. +- `x509_upload_path` (string) - The path on the remote machine where the X509 + certificate will be uploaded. This path must already exist and be writable. + X509 certificates are uploaded after provisioning is run, so it is perfectly + okay to create this directory as part of the provisioning process. -* `windows_password_timeout` (string) - The timeout for waiting for - a Windows password for Windows instances. Defaults to 20 minutes. - Example value: "10m" +- `windows_password_timeout` (string) - The timeout for waiting for a Windows + password for Windows instances. Defaults to 20 minutes. Example value: "10m" ## Basic Example Here is a basic example. It is completely valid except for the access keys: -```javascript +``` {.javascript} { "type": "amazon-instance", "access_key": "YOUR KEY HERE", @@ -254,84 +260,79 @@ Here is a basic example. It is completely valid except for the access keys: } ``` --> **Note:** Packer can also read the access key and secret -access key from environmental variables. See the configuration reference in -the section above for more information on what environmental variables Packer -will look for. +-> **Note:** Packer can also read the access key and secret access key from +environmental variables. See the configuration reference in the section above +for more information on what environmental variables Packer will look for. ## Accessing the Instance to Debug If you need to access the instance to debug for some reason, run the builder -with the `-debug` flag. In debug mode, the Amazon builder will save the -private key in the current directory and will output the DNS or IP information -as well. You can use this information to access the instance as it is -running. +with the `-debug` flag. In debug mode, the Amazon builder will save the private +key in the current directory and will output the DNS or IP information as well. +You can use this information to access the instance as it is running. ## Custom Bundle Commands -A lot of the process required for creating an instance-store backed AMI -involves commands being run on the actual source instance. Specifically, the -`ec2-bundle-vol` and `ec2-upload-bundle` commands must be used to bundle -the root filesystem and upload it, respectively. +A lot of the process required for creating an instance-store backed AMI involves +commands being run on the actual source instance. Specifically, the +`ec2-bundle-vol` and `ec2-upload-bundle` commands must be used to bundle the +root filesystem and upload it, respectively. Each of these commands have a lot of available flags. Instead of exposing each -possible flag as a template configuration option, the instance-store AMI -builder for Packer lets you customize the entire command used to bundle -and upload the AMI. +possible flag as a template configuration option, the instance-store AMI builder +for Packer lets you customize the entire command used to bundle and upload the +AMI. -These are configured with `bundle_vol_command` and `bundle_upload_command`. -Both of these configurations are -[configuration templates](/docs/templates/configuration-templates.html) -and have support for their own set of template variables. +These are configured with `bundle_vol_command` and `bundle_upload_command`. Both +of these configurations are [configuration +templates](/docs/templates/configuration-templates.html) and have support for +their own set of template variables. ### Bundle Volume Command -The default value for `bundle_vol_command` is shown below. It is split -across multiple lines for convenience of reading. The bundle volume command -is responsible for executing `ec2-bundle-vol` in order to store and image -of the root filesystem to use to create the AMI. +The default value for `bundle_vol_command` is shown below. It is split across +multiple lines for convenience of reading. The bundle volume command is +responsible for executing `ec2-bundle-vol` in order to store and image of the +root filesystem to use to create the AMI. -```text +``` {.text} sudo -i -n ec2-bundle-vol \ - -k {{.KeyPath}} \ - -u {{.AccountId}} \ - -c {{.CertPath}} \ - -r {{.Architecture}} \ - -e {{.PrivatePath}}/* \ - -d {{.Destination}} \ - -p {{.Prefix}} \ - --batch \ - --no-filter + -k {{.KeyPath}} \ + -u {{.AccountId}} \ + -c {{.CertPath}} \ + -r {{.Architecture}} \ + -e {{.PrivatePath}}/* \ + -d {{.Destination}} \ + -p {{.Prefix}} \ + --batch \ + --no-filter ``` The available template variables should be self-explanatory based on the parameters they're used to satisfy the `ec2-bundle-vol` command. -~> **Warning!** Some versions of ec2-bundle-vol silently ignore all .pem and +\~> **Warning!** Some versions of ec2-bundle-vol silently ignore all .pem and .gpg files during the bundling of the AMI, which can cause problems on some -systems, such as Ubuntu. You may want to customize the bundle volume command -to include those files (see the `--no-filter` option of ec2-bundle-vol). +systems, such as Ubuntu. You may want to customize the bundle volume command to +include those files (see the `--no-filter` option of ec2-bundle-vol). ### Bundle Upload Command -The default value for `bundle_upload_command` is shown below. It is split -across multiple lines for convenience of reading. The bundle upload command -is responsible for taking the bundled volume and uploading it to S3. +The default value for `bundle_upload_command` is shown below. It is split across +multiple lines for convenience of reading. The bundle upload command is +responsible for taking the bundled volume and uploading it to S3. -```text +``` {.text} sudo -i -n ec2-upload-bundle \ - -b {{.BucketName}} \ - -m {{.ManifestPath}} \ - -a {{.AccessKey}} \ - -s {{.SecretKey}} \ - -d {{.BundleDirectory}} \ - --batch \ - --region {{.Region}} \ - --retry + -b {{.BucketName}} \ + -m {{.ManifestPath}} \ + -a {{.AccessKey}} \ + -s {{.SecretKey}} \ + -d {{.BundleDirectory}} \ + --batch \ + --region {{.Region}} \ + --retry ``` The available template variables should be self-explanatory based on the parameters they're used to satisfy the `ec2-upload-bundle` command. - -[1]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html -[2]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html diff --git a/website/source/docs/builders/amazon.html.markdown b/website/source/docs/builders/amazon.html.markdown index ad336ad1c..69b4e509b 100644 --- a/website/source/docs/builders/amazon.html.markdown +++ b/website/source/docs/builders/amazon.html.markdown @@ -1,44 +1,47 @@ --- -layout: "docs" -page_title: "Amazon AMI Builder" -description: |- - Packer is able to create Amazon AMIs. To achieve this, Packer comes with multiple builders depending on the strategy you want to use to build the AMI. ---- +description: | + Packer is able to create Amazon AMIs. To achieve this, Packer comes with + multiple builders depending on the strategy you want to use to build the AMI. +layout: docs +page_title: Amazon AMI Builder +... # Amazon AMI Builder Packer is able to create Amazon AMIs. To achieve this, Packer comes with -multiple builders depending on the strategy you want to use to build the -AMI. Packer supports the following builders at the moment: +multiple builders depending on the strategy you want to use to build the AMI. +Packer supports the following builders at the moment: -* [amazon-ebs](/docs/builders/amazon-ebs.html) - Create EBS-backed AMIs - by launching a source AMI and re-packaging it into a new AMI after - provisioning. If in doubt, use this builder, which is the easiest to get - started with. +- [amazon-ebs](/docs/builders/amazon-ebs.html) - Create EBS-backed AMIs by + launching a source AMI and re-packaging it into a new AMI after provisioning. + If in doubt, use this builder, which is the easiest to get started with. -* [amazon-instance](/docs/builders/amazon-instance.html) - Create - instance-store AMIs by launching and provisioning a source instance, then - rebundling it and uploading it to S3. +- [amazon-instance](/docs/builders/amazon-instance.html) - Create instance-store + AMIs by launching and provisioning a source instance, then rebundling it and + uploading it to S3. -* [amazon-chroot](/docs/builders/amazon-chroot.html) - Create EBS-backed AMIs +- [amazon-chroot](/docs/builders/amazon-chroot.html) - Create EBS-backed AMIs from an existing EC2 instance by mounting the root device and using a [Chroot](http://en.wikipedia.org/wiki/Chroot) environment to provision that device. This is an **advanced builder and should not be used by - newcomers**. However, it is also the fastest way to build an EBS-backed - AMI since no new EC2 instance needs to be launched. + newcomers**. However, it is also the fastest way to build an EBS-backed AMI + since no new EC2 instance needs to be launched. --> **Don't know which builder to use?** If in doubt, use the -[amazon-ebs builder](/docs/builders/amazon-ebs.html). It is -much easier to use and Amazon generally recommends EBS-backed images nowadays. +-> **Don't know which builder to use?** If in doubt, use the [amazon-ebs +builder](/docs/builders/amazon-ebs.html). It is much easier to use and Amazon +generally recommends EBS-backed images nowadays. ## Using an IAM Instance Profile -If AWS keys are not specified in the template, a [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file or through environment variables -Packer will use credentials provided by the instance's IAM profile, if it has one. +If AWS keys are not specified in the template, a +[credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) +file or through environment variables Packer will use credentials provided by +the instance's IAM profile, if it has one. -The following policy document provides the minimal set permissions necessary for Packer to work: +The following policy document provides the minimal set permissions necessary for +Packer to work: -```javascript +``` {.javascript} { "Statement": [{ "Effect": "Allow", diff --git a/website/source/docs/builders/custom.html.markdown b/website/source/docs/builders/custom.html.markdown index a737c1bd1..dc6928d4b 100644 --- a/website/source/docs/builders/custom.html.markdown +++ b/website/source/docs/builders/custom.html.markdown @@ -1,13 +1,15 @@ --- -layout: "docs" -page_title: "Custom Builder" -description: |- - Packer is extensible, allowing you to write new builders without having to modify the core source code of Packer itself. Documentation for creating new builders is covered in the custom builders page of the Packer plugin section. ---- +description: | + Packer is extensible, allowing you to write new builders without having to + modify the core source code of Packer itself. Documentation for creating new + builders is covered in the custom builders page of the Packer plugin section. +layout: docs +page_title: Custom Builder +... # Custom Builder Packer is extensible, allowing you to write new builders without having to -modify the core source code of Packer itself. Documentation for creating -new builders is covered in the [custom builders](/docs/extend/builder.html) -page of the Packer plugin section. +modify the core source code of Packer itself. Documentation for creating new +builders is covered in the [custom builders](/docs/extend/builder.html) page of +the Packer plugin section. diff --git a/website/source/docs/builders/digitalocean.html.markdown b/website/source/docs/builders/digitalocean.html.markdown index c9ef3b315..b20523944 100644 --- a/website/source/docs/builders/digitalocean.html.markdown +++ b/website/source/docs/builders/digitalocean.html.markdown @@ -1,22 +1,26 @@ --- -layout: "docs" -page_title: "DigitalOcean Builder" -description: |- - The `digitalocean` Packer builder is able to create new images for use with DigitalOcean. The builder takes a source image, runs any provisioning necessary on the image after launching it, then snapshots it into a reusable image. This reusable image can then be used as the foundation of new servers that are launched within DigitalOcean. ---- +description: | + The `digitalocean` Packer builder is able to create new images for use with + DigitalOcean. The builder takes a source image, runs any provisioning necessary + on the image after launching it, then snapshots it into a reusable image. This + reusable image can then be used as the foundation of new servers that are + launched within DigitalOcean. +layout: docs +page_title: DigitalOcean Builder +... # DigitalOcean Builder Type: `digitalocean` The `digitalocean` Packer builder is able to create new images for use with -[DigitalOcean](http://www.digitalocean.com). The builder takes a source -image, runs any provisioning necessary on the image after launching it, -then snapshots it into a reusable image. This reusable image can then be -used as the foundation of new servers that are launched within DigitalOcean. +[DigitalOcean](http://www.digitalocean.com). The builder takes a source image, +runs any provisioning necessary on the image after launching it, then snapshots +it into a reusable image. This reusable image can then be used as the foundation +of new servers that are launched within DigitalOcean. -The builder does _not_ manage images. Once it creates an image, it is up to -you to use it or delete it. +The builder does *not* manage images. Once it creates an image, it is up to you +to use it or delete it. ## Configuration Reference @@ -25,50 +29,53 @@ segmented below into two categories: required and optional parameters. Within each category, the available configuration keys are alphabetized. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `api_token` (string) - The client TOKEN to use to access your account. - It can also be specified via environment variable `DIGITALOCEAN_API_TOKEN`, if set. +- `api_token` (string) - The client TOKEN to use to access your account. It can + also be specified via environment variable `DIGITALOCEAN_API_TOKEN`, if set. -* `image` (string) - The name (or slug) of the base image to use. This is the - image that will be used to launch a new droplet and provision it. - See https://developers.digitalocean.com/documentation/v2/#list-all-images for details on how to get a list of the the accepted image names/slugs. +- `image` (string) - The name (or slug) of the base image to use. This is the + image that will be used to launch a new droplet and provision it. See + https://developers.digitalocean.com/documentation/v2/\#list-all-images for + details on how to get a list of the the accepted image names/slugs. -* `region` (string) - The name (or slug) of the region to launch the droplet in. - Consequently, this is the region where the snapshot will be available. - See https://developers.digitalocean.com/documentation/v2/#list-all-regions for the accepted region names/slugs. +- `region` (string) - The name (or slug) of the region to launch the droplet in. + Consequently, this is the region where the snapshot will be available. See + https://developers.digitalocean.com/documentation/v2/\#list-all-regions for + the accepted region names/slugs. -* `size` (string) - The name (or slug) of the droplet size to use. - See https://developers.digitalocean.com/documentation/v2/#list-all-sizes for the accepted size names/slugs. +- `size` (string) - The name (or slug) of the droplet size to use. See + https://developers.digitalocean.com/documentation/v2/\#list-all-sizes for the + accepted size names/slugs. ### Optional: -* `droplet_name` (string) - The name assigned to the droplet. DigitalOcean - sets the hostname of the machine to this value. +- `droplet_name` (string) - The name assigned to the droplet. DigitalOcean sets + the hostname of the machine to this value. -* `private_networking` (boolean) - Set to `true` to enable private networking +- `private_networking` (boolean) - Set to `true` to enable private networking for the droplet being created. This defaults to `false`, or not enabled. -* `snapshot_name` (string) - The name of the resulting snapshot that will - appear in your account. This must be unique. - To help make this unique, use a function like `timestamp` (see - [configuration templates](/docs/templates/configuration-templates.html) for more info) +- `snapshot_name` (string) - The name of the resulting snapshot that will appear + in your account. This must be unique. To help make this unique, use a function + like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -* `state_timeout` (string) - The time to wait, as a duration string, - for a droplet to enter a desired state (such as "active") before - timing out. The default state timeout is "6m". +- `state_timeout` (string) - The time to wait, as a duration string, for a + droplet to enter a desired state (such as "active") before timing out. The + default state timeout is "6m". -* `user_data` (string) - User data to launch with the Droplet. +- `user_data` (string) - User data to launch with the Droplet. ## Basic Example -Here is a basic example. It is completely valid as soon as you enter your -own access tokens: +Here is a basic example. It is completely valid as soon as you enter your own +access tokens: -```javascript +``` {.javascript} { "type": "digitalocean", "api_token": "YOUR API KEY", diff --git a/website/source/docs/builders/docker.html.markdown b/website/source/docs/builders/docker.html.markdown index b5fe95075..b2fab5b19 100644 --- a/website/source/docs/builders/docker.html.markdown +++ b/website/source/docs/builders/docker.html.markdown @@ -1,39 +1,40 @@ --- -layout: "docs" -page_title: "Docker Builder" -description: |- - The `docker` Packer builder builds Docker images using Docker. The builder starts a Docker container, runs provisioners within this container, then exports the container for reuse or commits the image. ---- +description: | + The `docker` Packer builder builds Docker images using Docker. The builder + starts a Docker container, runs provisioners within this container, then exports + the container for reuse or commits the image. +layout: docs +page_title: Docker Builder +... # Docker Builder Type: `docker` The `docker` Packer builder builds [Docker](http://www.docker.io) images using -Docker. The builder starts a Docker container, runs provisioners within -this container, then exports the container for reuse or commits the image. +Docker. The builder starts a Docker container, runs provisioners within this +container, then exports the container for reuse or commits the image. -Packer builds Docker containers _without_ the use of -[Dockerfiles](https://docs.docker.com/reference/builder/). -By not using Dockerfiles, Packer is able to provision -containers with portable scripts or configuration management systems -that are not tied to Docker in any way. It also has a simpler mental model: -you provision containers much the same way you provision a normal virtualized -or dedicated server. For more information, read the section on -[Dockerfiles](#toc_8). +Packer builds Docker containers *without* the use of +[Dockerfiles](https://docs.docker.com/reference/builder/). By not using +Dockerfiles, Packer is able to provision containers with portable scripts or +configuration management systems that are not tied to Docker in any way. It also +has a simpler mental model: you provision containers much the same way you +provision a normal virtualized or dedicated server. For more information, read +the section on [Dockerfiles](#toc_8). The Docker builder must run on a machine that has Docker installed. Therefore the builder only works on machines that support Docker (modern Linux machines). -If you want to use Packer to build Docker containers on another platform, -use [Vagrant](http://www.vagrantup.com) to start a Linux environment, then -run Packer within that environment. +If you want to use Packer to build Docker containers on another platform, use +[Vagrant](http://www.vagrantup.com) to start a Linux environment, then run +Packer within that environment. ## Basic Example: Export -Below is a fully functioning example. It doesn't do anything useful, since -no provisioners are defined, but it will effectively repackage an image. +Below is a fully functioning example. It doesn't do anything useful, since no +provisioners are defined, but it will effectively repackage an image. -```javascript +``` {.javascript} { "type": "docker", "image": "ubuntu", @@ -43,11 +44,11 @@ no provisioners are defined, but it will effectively repackage an image. ## Basic Example: Commit -Below is another example, the same as above but instead of exporting the -running container, this one commits the container to an image. The image -can then be more easily tagged, pushed, etc. +Below is another example, the same as above but instead of exporting the running +container, this one commits the container to an image. The image can then be +more easily tagged, pushed, etc. -```javascript +``` {.javascript} { "type": "docker", "image": "ubuntu", @@ -55,7 +56,6 @@ can then be more easily tagged, pushed, etc. } ``` - ## Configuration Reference Configuration options are organized below into two categories: required and @@ -63,47 +63,47 @@ optional. Within each category, the available options are alphabetized and described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `commit` (boolean) - If true, the container will be committed to an - image rather than exported. This cannot be set if `export_path` is set. +- `commit` (boolean) - If true, the container will be committed to an image + rather than exported. This cannot be set if `export_path` is set. -* `export_path` (string) - The path where the final container will be exported +- `export_path` (string) - The path where the final container will be exported as a tar file. This cannot be set if `commit` is set to true. -* `image` (string) - The base image for the Docker container that will - be started. This image will be pulled from the Docker registry if it - doesn't already exist. +- `image` (string) - The base image for the Docker container that will + be started. This image will be pulled from the Docker registry if it doesn't + already exist. ### Optional: -* `login` (boolean) - Defaults to false. If true, the builder will - login in order to pull the image. The builder only logs in for the - duration of the pull. It always logs out afterwards. +- `login` (boolean) - Defaults to false. If true, the builder will login in + order to pull the image. The builder only logs in for the duration of + the pull. It always logs out afterwards. -* `login_email` (string) - The email to use to authenticate to login. +- `login_email` (string) - The email to use to authenticate to login. -* `login_username` (string) - The username to use to authenticate to login. +- `login_username` (string) - The username to use to authenticate to login. -* `login_password` (string) - The password to use to authenticate to login. +- `login_password` (string) - The password to use to authenticate to login. -* `login_server` (string) - The server address to login to. +- `login_server` (string) - The server address to login to. -* `pull` (boolean) - If true, the configured image will be pulled using - `docker pull` prior to use. Otherwise, it is assumed the image already - exists and can be used. This defaults to true if not set. +- `pull` (boolean) - If true, the configured image will be pulled using + `docker pull` prior to use. Otherwise, it is assumed the image already exists + and can be used. This defaults to true if not set. -* `run_command` (array of strings) - An array of arguments to pass to +- `run_command` (array of strings) - An array of arguments to pass to `docker run` in order to run the container. By default this is set to - `["-d", "-i", "-t", "{{.Image}}", "/bin/bash"]`. - As you can see, you have a couple template variables to customize, as well. + `["-d", "-i", "-t", "{{.Image}}", "/bin/bash"]`. As you can see, you have a + couple template variables to customize, as well. -* `volumes` (map of strings to strings) - A mapping of additional volumes - to mount into this container. The key of the object is the host path, - the value is the container path. +- `volumes` (map of strings to strings) - A mapping of additional volumes to + mount into this container. The key of the object is the host path, the value + is the container path. ## Using the Artifact: Export @@ -113,27 +113,26 @@ with the [docker-import](/docs/post-processors/docker-import.html) and [docker-push](/docs/post-processors/docker-push.html) post-processors. **Note:** This section is covering how to use an artifact that has been -_exported_. More specifically, if you set `export_path` in your configuration. +*exported*. More specifically, if you set `export_path` in your configuration. If you set `commit`, see the next section. -The example below shows a full configuration that would import and push -the created image. This is accomplished using a sequence definition (a -collection of post-processors that are treated as as single pipeline, see -[Post-Processors](/docs/templates/post-processors.html) -for more information): +The example below shows a full configuration that would import and push the +created image. This is accomplished using a sequence definition (a collection of +post-processors that are treated as as single pipeline, see +[Post-Processors](/docs/templates/post-processors.html) for more information): -```javascript +``` {.javascript} { "post-processors": [ - [ - { - "type": "docker-import", - "repository": "mitchellh/packer", - "tag": "0.7" - }, - "docker-push" - ] - ] + [ + { + "type": "docker-import", + "repository": "mitchellh/packer", + "tag": "0.7" + }, + "docker-push" + ] + ] } ``` @@ -143,10 +142,10 @@ post-processor which will import the artifact as a docker image. The resulting docker image is then passed on to the `docker-push` post-processor which handles pushing the image to a container repository. -If you want to do this manually, however, perhaps from a script, you can -import the image using the process below: +If you want to do this manually, however, perhaps from a script, you can import +the image using the process below: -```text +``` {.text} $ docker import - registry.mydomain.com/mycontainer:latest < artifact.tar ``` @@ -157,23 +156,22 @@ and `docker push`, respectively. If you committed your container to an image, you probably want to tag, save, push, etc. Packer can do this automatically for you. An example is shown below -which tags and pushes an image. This is accomplished using a sequence -definition (a collection of post-processors that are treated as as single -pipeline, see [Post-Processors](/docs/templates/post-processors.html) for more -information): +which tags and pushes an image. This is accomplished using a sequence definition +(a collection of post-processors that are treated as as single pipeline, see +[Post-Processors](/docs/templates/post-processors.html) for more information): -```javascript +``` {.javascript} { "post-processors": [ - [ - { - "type": "docker-tag", - "repository": "mitchellh/packer", - "tag": "0.7" - }, - "docker-push" - ] - ] + [ + { + "type": "docker-tag", + "repository": "mitchellh/packer", + "tag": "0.7" + }, + "docker-push" + ] + ] } ``` @@ -187,52 +185,52 @@ Going a step further, if you wanted to tag and push an image to multiple container repositories, this could be accomplished by defining two, nearly-identical sequence definitions, as demonstrated by the example below: -```javascript +``` {.javascript} { - "post-processors": [ - [ - { - "type": "docker-tag", - "repository": "mitchellh/packer", - "tag": "0.7" - }, - "docker-push" - ], - [ - { - "type": "docker-tag", - "repository": "hashicorp/packer", - "tag": "0.7" - }, - "docker-push" - ] - ] + "post-processors": [ + [ + { + "type": "docker-tag", + "repository": "mitchellh/packer", + "tag": "0.7" + }, + "docker-push" + ], + [ + { + "type": "docker-tag", + "repository": "hashicorp/packer", + "tag": "0.7" + }, + "docker-push" + ] + ] } ``` ## Dockerfiles -This builder allows you to build Docker images _without_ Dockerfiles. +This builder allows you to build Docker images *without* Dockerfiles. -With this builder, you can repeatably create Docker images without the use of -a Dockerfile. You don't need to know the syntax or semantics of Dockerfiles. +With this builder, you can repeatably create Docker images without the use of a +Dockerfile. You don't need to know the syntax or semantics of Dockerfiles. Instead, you can just provide shell scripts, Chef recipes, Puppet manifests, etc. to provision your Docker container just like you would a regular virtualized or dedicated machine. -While Docker has many features, Packer views Docker simply as an LXC -container runner. To that end, Packer is able to repeatably build these -LXC containers using portable provisioning scripts. +While Docker has many features, Packer views Docker simply as an LXC container +runner. To that end, Packer is able to repeatably build these LXC containers +using portable provisioning scripts. -Dockerfiles have some additional features that Packer doesn't support -which are able to be worked around. Many of these features will be automated -by Packer in the future: +Dockerfiles have some additional features that Packer doesn't support which are +able to be worked around. Many of these features will be automated by Packer in +the future: -* Dockerfiles will snapshot the container at each step, allowing you to - go back to any step in the history of building. Packer doesn't do this yet, - but inter-step snapshotting is on the way. +- Dockerfiles will snapshot the container at each step, allowing you to go back + to any step in the history of building. Packer doesn't do this yet, but + inter-step snapshotting is on the way. -* Dockerfiles can contain information such as exposed ports, shared - volumes, and other metadata. Packer builds a raw Docker container image - that has none of this metadata. You can pass in much of this metadata - at runtime with `docker run`. +- Dockerfiles can contain information such as exposed ports, shared volumes, and + other metadata. Packer builds a raw Docker container image that has none of + this metadata. You can pass in much of this metadata at runtime with + `docker run`. diff --git a/website/source/docs/builders/null.html.markdown b/website/source/docs/builders/null.html.markdown index 7398cadd7..037165ba2 100644 --- a/website/source/docs/builders/null.html.markdown +++ b/website/source/docs/builders/null.html.markdown @@ -1,24 +1,28 @@ --- -layout: "docs" -page_title: "Null Builder" -description: |- - The `null` Packer builder is not really a builder, it just sets up an SSH connection and runs the provisioners. It can be used to debug provisioners without incurring high wait times. It does not create any kind of image or artifact. ---- +description: | + The `null` Packer builder is not really a builder, it just sets up an SSH + connection and runs the provisioners. It can be used to debug provisioners + without incurring high wait times. It does not create any kind of image or + artifact. +layout: docs +page_title: Null Builder +... # Null Builder Type: `null` -The `null` Packer builder is not really a builder, it just sets up an SSH connection -and runs the provisioners. It can be used to debug provisioners without -incurring high wait times. It does not create any kind of image or artifact. +The `null` Packer builder is not really a builder, it just sets up an SSH +connection and runs the provisioners. It can be used to debug provisioners +without incurring high wait times. It does not create any kind of image or +artifact. ## Basic Example -Below is a fully functioning example. It doesn't do anything useful, since -no provisioners are defined, but it will connect to the specified host via ssh. +Below is a fully functioning example. It doesn't do anything useful, since no +provisioners are defined, but it will connect to the specified host via ssh. -```javascript +``` {.javascript} { "type": "null", "ssh_host": "127.0.0.1", @@ -31,4 +35,3 @@ no provisioners are defined, but it will connect to the specified host via ssh. The null builder has no configuration parameters other than the [communicator](/docs/templates/communicator.html) settings. - diff --git a/website/source/docs/builders/openstack.html.markdown b/website/source/docs/builders/openstack.html.markdown index fec1a85a6..409275c7b 100644 --- a/website/source/docs/builders/openstack.html.markdown +++ b/website/source/docs/builders/openstack.html.markdown @@ -1,25 +1,30 @@ --- -layout: "docs" -page_title: "OpenStack Builder" -description: |- - The `openstack` Packer builder is able to create new images for use with OpenStack. The builder takes a source image, runs any provisioning necessary on the image after launching it, then creates a new reusable image. This reusable image can then be used as the foundation of new servers that are launched within OpenStack. The builder will create temporary keypairs that provide temporary access to the server while the image is being created. This simplifies configuration quite a bit. ---- +description: | + The `openstack` Packer builder is able to create new images for use with + OpenStack. The builder takes a source image, runs any provisioning necessary on + the image after launching it, then creates a new reusable image. This reusable + image can then be used as the foundation of new servers that are launched within + OpenStack. The builder will create temporary keypairs that provide temporary + access to the server while the image is being created. This simplifies + configuration quite a bit. +layout: docs +page_title: OpenStack Builder +... # OpenStack Builder Type: `openstack` The `openstack` Packer builder is able to create new images for use with -[OpenStack](http://www.openstack.org). The builder takes a source -image, runs any provisioning necessary on the image after launching it, -then creates a new reusable image. This reusable image can then be -used as the foundation of new servers that are launched within OpenStack. -The builder will create temporary keypairs that provide temporary access to -the server while the image is being created. This simplifies configuration -quite a bit. +[OpenStack](http://www.openstack.org). The builder takes a source image, runs +any provisioning necessary on the image after launching it, then creates a new +reusable image. This reusable image can then be used as the foundation of new +servers that are launched within OpenStack. The builder will create temporary +keypairs that provide temporary access to the server while the image is being +created. This simplifies configuration quite a bit. -The builder does _not_ manage images. Once it creates an image, it is up to -you to use it or delete it. +The builder does *not* manage images. Once it creates an image, it is up to you +to use it or delete it. ## Configuration Reference @@ -28,81 +33,79 @@ segmented below into two categories: required and optional parameters. Within each category, the available configuration keys are alphabetized. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `flavor` (string) - The ID, name, or full URL for the desired flavor for the +- `flavor` (string) - The ID, name, or full URL for the desired flavor for the server to be created. -* `image_name` (string) - The name of the resulting image. +- `image_name` (string) - The name of the resulting image. -* `source_image` (string) - The ID or full URL to the base image to use. - This is the image that will be used to launch a new server and provision it. - Unless you specify completely custom SSH settings, the source image must - have `cloud-init` installed so that the keypair gets assigned properly. +- `source_image` (string) - The ID or full URL to the base image to use. This is + the image that will be used to launch a new server and provision it. Unless + you specify completely custom SSH settings, the source image must have + `cloud-init` installed so that the keypair gets assigned properly. -* `username` (string) - The username used to connect to the OpenStack service. - If not specified, Packer will use the environment variable - `OS_USERNAME`, if set. +- `username` (string) - The username used to connect to the OpenStack service. + If not specified, Packer will use the environment variable `OS_USERNAME`, + if set. -* `password` (string) - The password used to connect to the OpenStack service. - If not specified, Packer will use the environment variables - `OS_PASSWORD`, if set. +- `password` (string) - The password used to connect to the OpenStack service. + If not specified, Packer will use the environment variables `OS_PASSWORD`, + if set. ### Optional: -* `api_key` (string) - The API key used to access OpenStack. Some OpenStack +- `api_key` (string) - The API key used to access OpenStack. Some OpenStack installations require this. -* `availability_zone` (string) - The availability zone to launch the - server in. If this isn't specified, the default enforced by your OpenStack - cluster will be used. This may be required for some OpenStack clusters. +- `availability_zone` (string) - The availability zone to launch the server in. + If this isn't specified, the default enforced by your OpenStack cluster will + be used. This may be required for some OpenStack clusters. -* `floating_ip` (string) - A specific floating IP to assign to this instance. +- `floating_ip` (string) - A specific floating IP to assign to this instance. `use_floating_ip` must also be set to true for this to have an affect. -* `floating_ip_pool` (string) - The name of the floating IP pool to use - to allocate a floating IP. `use_floating_ip` must also be set to true - for this to have an affect. +- `floating_ip_pool` (string) - The name of the floating IP pool to use to + allocate a floating IP. `use_floating_ip` must also be set to true for this to + have an affect. -* `insecure` (boolean) - Whether or not the connection to OpenStack can be done +- `insecure` (boolean) - Whether or not the connection to OpenStack can be done over an insecure connection. By default this is false. -* `networks` (array of strings) - A list of networks by UUID to attach - to this instance. +- `networks` (array of strings) - A list of networks by UUID to attach to + this instance. -* `tenant_id` or `tenant_name` (string) - The tenant ID or name to boot the - instance into. Some OpenStack installations require this. - If not specified, Packer will use the environment variable - `OS_TENANT_NAME`, if set. +- `tenant_id` or `tenant_name` (string) - The tenant ID or name to boot the + instance into. Some OpenStack installations require this. If not specified, + Packer will use the environment variable `OS_TENANT_NAME`, if set. -* `security_groups` (array of strings) - A list of security groups by name - to add to this instance. +- `security_groups` (array of strings) - A list of security groups by name to + add to this instance. -* `region` (string) - The name of the region, such as "DFW", in which - to launch the server to create the AMI. - If not specified, Packer will use the environment variable - `OS_REGION_NAME`, if set. +- `region` (string) - The name of the region, such as "DFW", in which to launch + the server to create the AMI. If not specified, Packer will use the + environment variable `OS_REGION_NAME`, if set. -* `ssh_interface` (string) - The type of interface to connect via SSH. Values - useful for Rackspace are "public" or "private", and the default behavior is - to connect via whichever is returned first from the OpenStack API. +- `ssh_interface` (string) - The type of interface to connect via SSH. Values + useful for Rackspace are "public" or "private", and the default behavior is to + connect via whichever is returned first from the OpenStack API. -* `use_floating_ip` (boolean) - Whether or not to use a floating IP for +- `use_floating_ip` (boolean) - Whether or not to use a floating IP for the instance. Defaults to false. -* `rackconnect_wait` (boolean) - For rackspace, whether or not to wait for +- `rackconnect_wait` (boolean) - For rackspace, whether or not to wait for Rackconnect to assign the machine an IP address before connecting via SSH. Defaults to false. ## Basic Example: Rackspace public cloud -Here is a basic example. This is a working example to build a -Ubuntu 12.04 LTS (Precise Pangolin) on Rackspace OpenStack cloud offering. +Here is a basic example. This is a working example to build a Ubuntu 12.04 LTS +(Precise Pangolin) on Rackspace OpenStack cloud offering. -```javascript +``` {.javascript} { "type": "openstack", "username": "foo", @@ -117,10 +120,10 @@ Ubuntu 12.04 LTS (Precise Pangolin) on Rackspace OpenStack cloud offering. ## Basic Example: Private OpenStack cloud -This example builds an Ubuntu 14.04 image on a private OpenStack cloud, -powered by Metacloud. +This example builds an Ubuntu 14.04 image on a private OpenStack cloud, powered +by Metacloud. -```javascript +``` {.javascript} { "type": "openstack", "ssh_username": "root", @@ -130,12 +133,12 @@ powered by Metacloud. } ``` -In this case, the connection information for connecting to OpenStack -doesn't appear in the template. That is because I source a standard -OpenStack script with environment variables set before I run this. This -script is setting environment variables like: +In this case, the connection information for connecting to OpenStack doesn't +appear in the template. That is because I source a standard OpenStack script +with environment variables set before I run this. This script is setting +environment variables like: -* `OS_AUTH_URL` -* `OS_TENANT_ID` -* `OS_USERNAME` -* `OS_PASSWORD` +- `OS_AUTH_URL` +- `OS_TENANT_ID` +- `OS_USERNAME` +- `OS_PASSWORD` diff --git a/website/source/docs/builders/parallels-iso.html.markdown b/website/source/docs/builders/parallels-iso.html.markdown index f0192b301..d89b5394f 100644 --- a/website/source/docs/builders/parallels-iso.html.markdown +++ b/website/source/docs/builders/parallels-iso.html.markdown @@ -1,31 +1,31 @@ --- -layout: "docs" -page_title: "Parallels Builder (from an ISO)" -description: |- - The Parallels Packer builder is able to create Parallels Desktop for Mac virtual machines and export them in the PVM format, starting from an ISO image. ---- +description: | + The Parallels Packer builder is able to create Parallels Desktop for Mac virtual + machines and export them in the PVM format, starting from an ISO image. +layout: docs +page_title: 'Parallels Builder (from an ISO)' +... # Parallels Builder (from an ISO) Type: `parallels-iso` -The Parallels Packer builder is able to create -[Parallels Desktop for Mac](http://www.parallels.com/products/desktop/) virtual -machines and export them in the PVM format, starting from an -ISO image. +The Parallels Packer builder is able to create [Parallels Desktop for +Mac](http://www.parallels.com/products/desktop/) virtual machines and export +them in the PVM format, starting from an ISO image. -The builder builds a virtual machine by creating a new virtual machine -from scratch, booting it, installing an OS, provisioning software within -the OS, then shutting it down. The result of the Parallels builder is a directory -containing all the files necessary to run the virtual machine portably. +The builder builds a virtual machine by creating a new virtual machine from +scratch, booting it, installing an OS, provisioning software within the OS, then +shutting it down. The result of the Parallels builder is a directory containing +all the files necessary to run the virtual machine portably. ## Basic Example -Here is a basic example. This example is not functional. It will start the -OS installer but then fail because we don't provide the preseed file for -Ubuntu to self-install. Still, the example serves to show the basic configuration: +Here is a basic example. This example is not functional. It will start the OS +installer but then fail because we don't provide the preseed file for Ubuntu to +self-install. Still, the example serves to show the basic configuration: -```javascript +``` {.javascript} { "type": "parallels-iso", "guest_os_type": "ubuntu", @@ -40,219 +40,219 @@ Ubuntu to self-install. Still, the example serves to show the basic configuratio } ``` -It is important to add a `shutdown_command`. By default Packer halts the -virtual machine and the file system may not be sync'd. Thus, changes made in a +It is important to add a `shutdown_command`. By default Packer halts the virtual +machine and the file system may not be sync'd. Thus, changes made in a provisioner might not be saved. ## Configuration Reference -There are many configuration options available for the Parallels builder. -They are organized below into two categories: required and optional. Within -each category, the available options are alphabetized and described. +There are many configuration options available for the Parallels builder. They +are organized below into two categories: required and optional. Within each +category, the available options are alphabetized and described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO - files are so large, this is required and Packer will verify it prior - to booting a virtual machine with the ISO attached. The type of the - checksum is specified with `iso_checksum_type`, documented below. +- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO files + are so large, this is required and Packer will verify it prior to booting a + virtual machine with the ISO attached. The type of the checksum is specified + with `iso_checksum_type`, documented below. -* `iso_checksum_type` (string) - The type of the checksum specified in +- `iso_checksum_type` (string) - The type of the checksum specified in `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or "sha512" currently. While "none" will skip checksumming, this is not recommended since ISO files are generally large and corruption does happen from time to time. -* `iso_url` (string) - A URL to the ISO containing the installation image. - This URL can be either an HTTP URL or a file URL (or path to a file). - If this is an HTTP URL, Packer will download it and cache it between - runs. +- `iso_url` (string) - A URL to the ISO containing the installation image. This + URL can be either an HTTP URL or a file URL (or path to a file). If this is an + HTTP URL, Packer will download it and cache it between runs. -* `ssh_username` (string) - The username to use to SSH into the machine - once the OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once the + OS is installed. -* `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to +- `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to install into the VM. Valid values are "win", "lin", "mac", "os2" and "other". This can be omitted only if `parallels_tools_mode` is "disable". ### Optional: -* `boot_command` (array of strings) - This is an array of commands to type - when the virtual machine is first booted. The goal of these commands should - be to type just enough to initialize the operating system installer. Special - keys can be typed as well, and are covered in the section below on the boot - command. If this is not specified, it is assumed the installer will start - itself. +- `boot_command` (array of strings) - This is an array of commands to type when + the virtual machine is first booted. The goal of these commands should be to + type just enough to initialize the operating system installer. Special keys + can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -* `boot_wait` (string) - The time to wait after booting the initial virtual +- `boot_wait` (string) - The time to wait after booting the initial virtual machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait - five seconds and one minute 30 seconds, respectively. If this isn't specified, - the default is 10 seconds. + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five + seconds and one minute 30 seconds, respectively. If this isn't specified, the + default is 10 seconds. -* `disk_size` (integer) - The size, in megabytes, of the hard disk to create - for the VM. By default, this is 40000 (about 40 GB). +- `disk_size` (integer) - The size, in megabytes, of the hard disk to create for + the VM. By default, this is 40000 (about 40 GB). -* `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful - for unattended Windows installs, which look for an `Autounattend.xml` file - on removable media. By default, no floppy will be attached. All files - listed in this setting get placed into the root directory of the floppy - and the floppy is attached as the first floppy device. Currently, no - support exists for creating sub-directories on the floppy. Wildcard - characters (*, ?, and []) are allowed. Directory names are also allowed, - which will add all the files found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) + are allowed. Directory names are also allowed, which will add all the files + found in the directory to the floppy. -* `guest_os_type` (string) - The guest OS type being installed. By default - this is "other", but you can get _dramatic_ performance improvements by - setting this to the proper value. To view all available values for this - run `prlctl create x --distribution list`. Setting the correct value hints to - Parallels Desktop how to optimize the virtual hardware to work best with - that operating system. +- `guest_os_type` (string) - The guest OS type being installed. By default this + is "other", but you can get *dramatic* performance improvements by setting + this to the proper value. To view all available values for this run + `prlctl create x --distribution list`. Setting the correct value hints to + Parallels Desktop how to optimize the virtual hardware to work best with that + operating system. -* `hard_drive_interface` (string) - The type of controller that the - hard drives are attached to, defaults to "sata". Valid options are - "sata", "ide", and "scsi". +- `hard_drive_interface` (string) - The type of controller that the hard drives + are attached to, defaults to "sata". Valid options are "sata", "ide", + and "scsi". -* `host_interfaces` (array of strings) - A list of which interfaces on the - host should be searched for a IP address. The first IP address found on - one of these will be used as `{{ .HTTPIP }}` in the `boot_command`. - Defaults to ["en0", "en1", "en2", "en3", "en4", "en5", "en6", "en7", "en8", - "en9", "ppp0", "ppp1", "ppp2"]. +- `host_interfaces` (array of strings) - A list of which interfaces on the host + should be searched for a IP address. The first IP address found on one of + these will be used as `{{ .HTTPIP }}` in the `boot_command`. Defaults to + \["en0", "en1", "en2", "en3", "en4", "en5", "en6", "en7", "en8", "en9", + "ppp0", "ppp1", "ppp2"\]. -* `http_directory` (string) - Path to a directory to serve using an HTTP - server. The files in this directory will be available over HTTP that will - be requestable from the virtual machine. This is useful for hosting - kickstart files and so on. By default this is "", which means no HTTP - server will be started. The address and port of the HTTP server will be - available as variables in `boot_command`. This is covered in more detail - below. +- `http_directory` (string) - Path to a directory to serve using an HTTP server. + The files in this directory will be available over HTTP that will be + requestable from the virtual machine. This is useful for hosting kickstart + files and so on. By default this is "", which means no HTTP server will + be started. The address and port of the HTTP server will be available as + variables in `boot_command`. This is covered in more detail below. -* `http_port_min` and `http_port_max` (integer) - These are the minimum and +- `http_port_min` and `http_port_max` (integer) - These are the minimum and maximum port to use for the HTTP server started to serve the `http_directory`. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. - By default the values are 8000 and 9000, respectively. + server to be on one port, make this minimum and maximum port the same. By + default the values are 8000 and 9000, respectively. -* `iso_urls` (array of strings) - Multiple URLs for the ISO to download. - Packer will try these in order. If anything goes wrong attempting to download - or while downloading a single URL, it will move on to the next. All URLs - must point to the same file (same checksum). By default this is empty - and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. +- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer + will try these in order. If anything goes wrong attempting to download or + while downloading a single URL, it will move on to the next. All URLs must + point to the same file (same checksum). By default this is empty and `iso_url` + is used. Only one of `iso_url` or `iso_urls` can be specified. -* `output_directory` (string) - This is the path to the directory where the +- `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running the builder. - By default this is "output-BUILDNAME" where "BUILDNAME" is the name - of the build. + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -* `parallels_tools_guest_path` (string) - The path in the virtual machine to upload - Parallels Tools. This only takes effect if `parallels_tools_mode` is "upload". - This is a [configuration template](/docs/templates/configuration-templates.html) - that has a single valid variable: `Flavor`, which will be the value of - `parallels_tools_flavor`. By default this is "prl-tools-{{.Flavor}}.iso" which - should upload into the login directory of the user. +- `parallels_tools_guest_path` (string) - The path in the virtual machine to + upload Parallels Tools. This only takes effect if `parallels_tools_mode` + is "upload". This is a [configuration + template](/docs/templates/configuration-templates.html) that has a single + valid variable: `Flavor`, which will be the value of `parallels_tools_flavor`. + By default this is "prl-tools-{{.Flavor}}.iso" which should upload into the + login directory of the user. -* `parallels_tools_mode` (string) - The method by which Parallels Tools are made +- `parallels_tools_mode` (string) - The method by which Parallels Tools are made available to the guest for installation. Valid options are "upload", "attach", or "disable". If the mode is "attach" the Parallels Tools ISO will be attached as a CD device to the virtual machine. If the mode is "upload" the Parallels Tools ISO will be uploaded to the path specified by `parallels_tools_guest_path`. The default value is "upload". -* `prlctl` (array of array of strings) - Custom `prlctl` commands to execute in +- `prlctl` (array of array of strings) - Custom `prlctl` commands to execute in order to further customize the virtual machine being created. The value of - this is an array of commands to execute. The commands are executed in the order - defined in the template. For each command, the command is defined itself as an - array of strings, where each string represents a single argument on the + this is an array of commands to execute. The commands are executed in the + order defined in the template. For each command, the command is defined itself + as an array of strings, where each string represents a single argument on the command-line to `prlctl` (but excluding `prlctl` itself). Each arg is treated as a [configuration template](/docs/templates/configuration-templates.html), - where the `Name` variable is replaced with the VM name. More details on how - to use `prlctl` are below. + where the `Name` variable is replaced with the VM name. More details on how to + use `prlctl` are below. -* `prlctl_post` (array of array of strings) - Identical to `prlctl`, - except that it is run after the virtual machine is shutdown, and before the - virtual machine is exported. +- `prlctl_post` (array of array of strings) - Identical to `prlctl`, except that + it is run after the virtual machine is shutdown, and before the virtual + machine is exported. -* `prlctl_version_file` (string) - The path within the virtual machine to upload +- `prlctl_version_file` (string) - The path within the virtual machine to upload a file that contains the `prlctl` version that was used to create the machine. This information can be useful for provisioning. By default this is - ".prlctl_version", which will generally upload it into the home directory. + ".prlctl\_version", which will generally upload it into the home directory. -* `shutdown_command` (string) - The command to use to gracefully shut down - the machine once all the provisioning is done. By default this is an empty - string, which tells Packer to just forcefully shut down the machine. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty string, + which tells Packer to just forcefully shut down the machine. -* `shutdown_timeout` (string) - The amount of time to wait after executing - the `shutdown_command` for the virtual machine to actually shut down. - If it doesn't shut down in this time, it is an error. By default, the timeout - is "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -* `vm_name` (string) - This is the name of the PVM directory for the new - virtual machine, without the file extension. By default this is - "packer-BUILDNAME", where "BUILDNAME" is the name of the build. +- `vm_name` (string) - This is the name of the PVM directory for the new virtual + machine, without the file extension. By default this is "packer-BUILDNAME", + where "BUILDNAME" is the name of the build. ## Boot Command -The `boot_command` configuration is very important: it specifies the keys -to type when the virtual machine is first booted in order to start the -OS installer. This command is typed after `boot_wait`, which gives the -virtual machine some time to actually load the ISO. +The `boot_command` configuration is very important: it specifies the keys to +type when the virtual machine is first booted in order to start the OS +installer. This command is typed after `boot_wait`, which gives the virtual +machine some time to actually load the ISO. -As documented above, the `boot_command` is an array of strings. The -strings are all typed in sequence. It is an array only to improve readability -within the template. +As documented above, the `boot_command` is an array of strings. The strings are +all typed in sequence. It is an array only to improve readability within the +template. The boot command is "typed" character for character (using the Parallels Virtualization SDK, see [Parallels Builder](/docs/builders/parallels.html)) -simulating a human actually typing the keyboard. There are a set of special -keys available. If these are in your boot command, they will be replaced by -the proper key: +simulating a human actually typing the keyboard. There are a set of special keys +available. If these are in your boot command, they will be replaced by the +proper key: -* `` - Backspace +- `` - Backspace -* `` - Delete +- `` - Delete -* `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -* `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -* `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -* `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -* `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -* `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -* `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -* `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -* `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -* `` `` `` - Adds a 1, 5 or 10 second pause before sending any additional keys. This - is useful if you have to generally wait for the UI to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before sending + any additional keys. This is useful if you have to generally wait for the UI + to update before typing more. In addition to the special keys, each command to type is treated as a -[configuration template](/docs/templates/configuration-templates.html). -The available variables are: +[configuration template](/docs/templates/configuration-templates.html). The +available variables are: -* `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server - that is started serving the directory specified by the `http_directory` - configuration parameter. If `http_directory` isn't specified, these will - be blank! +- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server that + is started serving the directory specified by the `http_directory` + configuration parameter. If `http_directory` isn't specified, these will be + blank! -Example boot command. This is actually a working boot command used to start -an Ubuntu 12.04 installer: +Example boot command. This is actually a working boot command used to start an +Ubuntu 12.04 installer: -```text +``` {.text} [ "", "/install/vmlinuz noapic ", @@ -267,17 +267,18 @@ an Ubuntu 12.04 installer: ``` ## prlctl Commands + In order to perform extra customization of the virtual machine, a template can define extra calls to `prlctl` to perform. [prlctl](http://download.parallels.com/desktop/v9/ga/docs/en_US/Parallels%20Command%20Line%20Reference%20Guide.pdf) is the command-line interface to Parallels Desktop. It can be used to configure the virtual machine, such as set RAM, CPUs, etc. -Extra `prlctl` commands are defined in the template in the `prlctl` section. -An example is shown below that sets the memory and number of CPUs within the +Extra `prlctl` commands are defined in the template in the `prlctl` section. An +example is shown below that sets the memory and number of CPUs within the virtual machine: -```javascript +``` {.javascript} { "prlctl": [ ["set", "{{.Name}}", "--memsize", "1024"], @@ -291,7 +292,7 @@ executed in the order defined. So in the above example, the memory will be set followed by the CPUs. Each command itself is an array of strings, where each string is an argument to -`prlctl`. Each argument is treated as a -[configuration template](/docs/templates/configuration-templates.html). The only -available variable is `Name` which is replaced with the unique name of the VM, -which is required for many `prlctl` calls. +`prlctl`. Each argument is treated as a [configuration +template](/docs/templates/configuration-templates.html). The only available +variable is `Name` which is replaced with the unique name of the VM, which is +required for many `prlctl` calls. diff --git a/website/source/docs/builders/parallels-pvm.html.markdown b/website/source/docs/builders/parallels-pvm.html.markdown index 4083a57fd..f4f9f352c 100644 --- a/website/source/docs/builders/parallels-pvm.html.markdown +++ b/website/source/docs/builders/parallels-pvm.html.markdown @@ -1,30 +1,31 @@ --- -layout: "docs" -page_title: "Parallels Builder (from a PVM)" -description: |- - This Parallels builder is able to create Parallels Desktop for Mac virtual machines and export them in the PVM format, starting from an existing PVM (exported virtual machine image). ---- +description: | + This Parallels builder is able to create Parallels Desktop for Mac virtual + machines and export them in the PVM format, starting from an existing PVM + (exported virtual machine image). +layout: docs +page_title: 'Parallels Builder (from a PVM)' +... # Parallels Builder (from a PVM) Type: `parallels-pvm` -This Parallels builder is able to create -[Parallels Desktop for Mac](http://www.parallels.com/products/desktop/) -virtual machines and export them in the PVM format, starting from an -existing PVM (exported virtual machine image). +This Parallels builder is able to create [Parallels Desktop for +Mac](http://www.parallels.com/products/desktop/) virtual machines and export +them in the PVM format, starting from an existing PVM (exported virtual machine +image). -The builder builds a virtual machine by importing an existing PVM -file. It then boots this image, runs provisioners on this new VM, and -exports that VM to create the image. The imported machine is deleted prior -to finishing the build. +The builder builds a virtual machine by importing an existing PVM file. It then +boots this image, runs provisioners on this new VM, and exports that VM to +create the image. The imported machine is deleted prior to finishing the build. ## Basic Example Here is a basic example. This example is functional if you have an PVM matching the settings here. -```javascript +``` {.javascript} { "type": "parallels-pvm", "parallels_tools_flavor": "lin", @@ -36,175 +37,180 @@ the settings here. } ``` -It is important to add a `shutdown_command`. By default Packer halts the -virtual machine and the file system may not be sync'd. Thus, changes made in a +It is important to add a `shutdown_command`. By default Packer halts the virtual +machine and the file system may not be sync'd. Thus, changes made in a provisioner might not be saved. ## Configuration Reference -There are many configuration options available for the Parallels builder. -They are organized below into two categories: required and optional. Within -each category, the available options are alphabetized and described. +There are many configuration options available for the Parallels builder. They +are organized below into two categories: required and optional. Within each +category, the available options are alphabetized and described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `source_path` (string) - The path to a PVM directory that acts as - the source of this build. +- `source_path` (string) - The path to a PVM directory that acts as the source + of this build. -* `ssh_username` (string) - The username to use to SSH into the machine - once the OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once the + OS is installed. -* `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to +- `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to install into the VM. Valid values are "win", "lin", "mac", "os2" and "other". This can be omitted only if `parallels_tools_mode` is "disable". ### Optional: -* `boot_command` (array of strings) - This is an array of commands to type - when the virtual machine is first booted. The goal of these commands should - be to type just enough to initialize the operating system installer. Special - keys can be typed as well, and are covered in the section below on the boot - command. If this is not specified, it is assumed the installer will start - itself. +- `boot_command` (array of strings) - This is an array of commands to type when + the virtual machine is first booted. The goal of these commands should be to + type just enough to initialize the operating system installer. Special keys + can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -* `boot_wait` (string) - The time to wait after booting the initial virtual +- `boot_wait` (string) - The time to wait after booting the initial virtual machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait - five seconds and one minute 30 seconds, respectively. If this isn't specified, - the default is 10 seconds. + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five + seconds and one minute 30 seconds, respectively. If this isn't specified, the + default is 10 seconds. -* `floppy_files` (array of strings) - A list of files to put onto a floppy - disk that is attached when the VM is booted for the first time. This is - most useful for unattended Windows installs, which look for an - `Autounattend.xml` file on removable media. By default no floppy will - be attached. The files listed in this configuration will all be put - into the root directory of the floppy disk; sub-directories are not supported. +- `floppy_files` (array of strings) - A list of files to put onto a floppy disk + that is attached when the VM is booted for the first time. This is most useful + for unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default no floppy will be attached. The files listed in + this configuration will all be put into the root directory of the floppy disk; + sub-directories are not supported. -* `reassign_mac` (boolean) - If this is "false" the MAC address of the first - NIC will reused when imported else a new MAC address will be generated by - Parallels. Defaults to "false". +- `reassign_mac` (boolean) - If this is "false" the MAC address of the first NIC + will reused when imported else a new MAC address will be generated + by Parallels. Defaults to "false". -* `output_directory` (string) - This is the path to the directory where the +- `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running the builder. - By default this is "output-BUILDNAME" where "BUILDNAME" is the name - of the build. + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -* `parallels_tools_guest_path` (string) - The path in the VM to upload +- `parallels_tools_guest_path` (string) - The path in the VM to upload Parallels Tools. This only takes effect if `parallels_tools_mode` is "upload". - This is a [configuration template](/docs/templates/configuration-templates.html) - that has a single valid variable: `Flavor`, which will be the value of - `parallels_tools_flavor`. By default this is "prl-tools-{{.Flavor}}.iso" which - should upload into the login directory of the user. + This is a [configuration + template](/docs/templates/configuration-templates.html) that has a single + valid variable: `Flavor`, which will be the value of `parallels_tools_flavor`. + By default this is "prl-tools-{{.Flavor}}.iso" which should upload into the + login directory of the user. -* `parallels_tools_mode` (string) - The method by which Parallels Tools are made +- `parallels_tools_mode` (string) - The method by which Parallels Tools are made available to the guest for installation. Valid options are "upload", "attach", or "disable". If the mode is "attach" the Parallels Tools ISO will be attached as a CD device to the virtual machine. If the mode is "upload" the Parallels Tools ISO will be uploaded to the path specified by `parallels_tools_guest_path`. The default value is "upload". -* `prlctl` (array of array of strings) - Custom `prlctl` commands to execute in +- `prlctl` (array of array of strings) - Custom `prlctl` commands to execute in order to further customize the virtual machine being created. The value of - this is an array of commands to execute. The commands are executed in the order - defined in the template. For each command, the command is defined itself as an - array of strings, where each string represents a single argument on the + this is an array of commands to execute. The commands are executed in the + order defined in the template. For each command, the command is defined itself + as an array of strings, where each string represents a single argument on the command-line to `prlctl` (but excluding `prlctl` itself). Each arg is treated as a [configuration template](/docs/templates/configuration-templates.html), - where the `Name` variable is replaced with the VM name. More details on how - to use `prlctl` are below. + where the `Name` variable is replaced with the VM name. More details on how to + use `prlctl` are below. -* `prlctl_post` (array of array of strings) - Identical to `prlctl`, - except that it is run after the virtual machine is shutdown, and before the - virtual machine is exported. +- `prlctl_post` (array of array of strings) - Identical to `prlctl`, except that + it is run after the virtual machine is shutdown, and before the virtual + machine is exported. -* `prlctl_version_file` (string) - The path within the virtual machine to upload +- `prlctl_version_file` (string) - The path within the virtual machine to upload a file that contains the `prlctl` version that was used to create the machine. This information can be useful for provisioning. By default this is - ".prlctl_version", which will generally upload it into the home directory. + ".prlctl\_version", which will generally upload it into the home directory. -* `shutdown_command` (string) - The command to use to gracefully shut down - the machine once all the provisioning is done. By default this is an empty - string, which tells Packer to just forcefully shut down the machine. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty string, + which tells Packer to just forcefully shut down the machine. -* `shutdown_timeout` (string) - The amount of time to wait after executing - the `shutdown_command` for the virtual machine to actually shut down. - If it doesn't shut down in this time, it is an error. By default, the timeout - is "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -* `vm_name` (string) - This is the name of the virtual machine when it is - imported as well as the name of the PVM directory when the virtual machine is - exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is - the name of the build. +- `vm_name` (string) - This is the name of the virtual machine when it is + imported as well as the name of the PVM directory when the virtual machine + is exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is the + name of the build. ## Parallels Tools + After the virtual machine is up and the operating system is installed, Packer uploads the Parallels Tools into the virtual machine. The path where they are uploaded is controllable by `parallels_tools_path`, and defaults to "prl-tools.iso". Without an absolute path, it is uploaded to the home directory -of the SSH user. Parallels Tools ISO's can be found in: -"/Applications/Parallels Desktop.app/Contents/Resources/Tools/" +of the SSH user. Parallels Tools ISO's can be found in: "/Applications/Parallels +Desktop.app/Contents/Resources/Tools/" ## Boot Command -The `boot_command` specifies the keys to type when the virtual machine is first booted. This command is typed after `boot_wait`. +The `boot_command` specifies the keys to type when the virtual machine is first +booted. This command is typed after `boot_wait`. -As documented above, the `boot_command` is an array of strings. The -strings are all typed in sequence. It is an array only to improve readability -within the template. +As documented above, the `boot_command` is an array of strings. The strings are +all typed in sequence. It is an array only to improve readability within the +template. The boot command is "typed" character for character (using the Parallels Virtualization SDK, see [Parallels Builder](/docs/builders/parallels.html)) -simulating a human actually typing the keyboard. There are a set of special -keys available. If these are in your boot command, they will be replaced by -the proper key: +simulating a human actually typing the keyboard. There are a set of special keys +available. If these are in your boot command, they will be replaced by the +proper key: -* `` - Backspace +- `` - Backspace -* `` - Delete +- `` - Delete -* `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -* `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -* `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -* `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -* `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -* `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -* `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -* `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -* `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -* `` `` `` - Adds a 1, 5 or 10 second pause before sending any additional keys. This - is useful if you have to generally wait for the UI to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before sending + any additional keys. This is useful if you have to generally wait for the UI + to update before typing more. In addition to the special keys, each command to type is treated as a -[configuration template](/docs/templates/configuration-templates.html). -The available variables are: +[configuration template](/docs/templates/configuration-templates.html). The +available variables are: ## prlctl Commands + In order to perform extra customization of the virtual machine, a template can define extra calls to `prlctl` to perform. [prlctl](http://download.parallels.com/desktop/v9/ga/docs/en_US/Parallels%20Command%20Line%20Reference%20Guide.pdf) is the command-line interface to Parallels Desktop. It can be used to configure the virtual machine, such as set RAM, CPUs, etc. -Extra `prlctl` commands are defined in the template in the `prlctl` section. -An example is shown below that sets the memory and number of CPUs within the +Extra `prlctl` commands are defined in the template in the `prlctl` section. An +example is shown below that sets the memory and number of CPUs within the virtual machine: -```javascript +``` {.javascript} { "prlctl": [ ["set", "{{.Name}}", "--memsize", "1024"], @@ -218,7 +224,7 @@ executed in the order defined. So in the above example, the memory will be set followed by the CPUs. Each command itself is an array of strings, where each string is an argument to -`prlctl`. Each argument is treated as a -[configuration template](/docs/templates/configuration-templates.html). The only -available variable is `Name` which is replaced with the unique name of the VM, -which is required for many `prlctl` calls. +`prlctl`. Each argument is treated as a [configuration +template](/docs/templates/configuration-templates.html). The only available +variable is `Name` which is replaced with the unique name of the VM, which is +required for many `prlctl` calls. diff --git a/website/source/docs/builders/parallels.html.markdown b/website/source/docs/builders/parallels.html.markdown index db5f62139..7d355eaef 100644 --- a/website/source/docs/builders/parallels.html.markdown +++ b/website/source/docs/builders/parallels.html.markdown @@ -1,34 +1,37 @@ --- -layout: "docs" -page_title: "Parallels Builder" -description: |- - The Parallels Packer builder is able to create Parallels Desktop for Mac virtual machines and export them in the PVM format. ---- +description: | + The Parallels Packer builder is able to create Parallels Desktop for Mac virtual + machines and export them in the PVM format. +layout: docs +page_title: Parallels Builder +... # Parallels Builder -The Parallels Packer builder is able to create [Parallels Desktop for Mac](http://www.parallels.com/products/desktop/) virtual machines and export them in the PVM format. +The Parallels Packer builder is able to create [Parallels Desktop for +Mac](http://www.parallels.com/products/desktop/) virtual machines and export +them in the PVM format. -Packer actually comes with multiple builders able to create Parallels -machines, depending on the strategy you want to use to build the image. -Packer supports the following Parallels builders: +Packer actually comes with multiple builders able to create Parallels machines, +depending on the strategy you want to use to build the image. Packer supports +the following Parallels builders: -* [parallels-iso](/docs/builders/parallels-iso.html) - Starts from - an ISO file, creates a brand new Parallels VM, installs an OS, - provisions software within the OS, then exports that machine to create - an image. This is best for people who want to start from scratch. - -* [parallels-pvm](/docs/builders/parallels-pvm.html) - This builder - imports an existing PVM file, runs provisioners on top of that VM, - and exports that machine to create an image. This is best if you have - an existing Parallels VM export you want to use as the source. As an - additional benefit, you can feed the artifact of this builder back into - itself to iterate on a machine. +- [parallels-iso](/docs/builders/parallels-iso.html) - Starts from an ISO file, + creates a brand new Parallels VM, installs an OS, provisions software within + the OS, then exports that machine to create an image. This is best for people + who want to start from scratch. +- [parallels-pvm](/docs/builders/parallels-pvm.html) - This builder imports an + existing PVM file, runs provisioners on top of that VM, and exports that + machine to create an image. This is best if you have an existing Parallels VM + export you want to use as the source. As an additional benefit, you can feed + the artifact of this builder back into itself to iterate on a machine. ## Requirements -In addition to [Parallels Desktop for Mac](http://www.parallels.com/products/desktop/) this requires the -[Parallels Virtualization SDK](http://www.parallels.com/downloads/desktop/). +In addition to [Parallels Desktop for +Mac](http://www.parallels.com/products/desktop/) this requires the [Parallels +Virtualization SDK](http://www.parallels.com/downloads/desktop/). -The SDK can be installed by downloading and following the instructions in the dmg. +The SDK can be installed by downloading and following the instructions in the +dmg. diff --git a/website/source/docs/builders/qemu.html.markdown b/website/source/docs/builders/qemu.html.markdown index ce39c53ec..57c53e4c0 100644 --- a/website/source/docs/builders/qemu.html.markdown +++ b/website/source/docs/builders/qemu.html.markdown @@ -1,30 +1,31 @@ --- -layout: "docs" -page_title: "QEMU Builder" -description: |- - The Qemu Packer builder is able to create KVM and Xen virtual machine images. Support for Xen is experimental at this time. ---- +description: | + The Qemu Packer builder is able to create KVM and Xen virtual machine images. + Support for Xen is experimental at this time. +layout: docs +page_title: QEMU Builder +... # QEMU Builder Type: `qemu` -The Qemu Packer builder is able to create [KVM](http://www.linux-kvm.org) -and [Xen](http://www.xenproject.org) virtual machine images. Support -for Xen is experimental at this time. +The Qemu Packer builder is able to create [KVM](http://www.linux-kvm.org) and +[Xen](http://www.xenproject.org) virtual machine images. Support for Xen is +experimental at this time. -The builder builds a virtual machine by creating a new virtual machine -from scratch, booting it, installing an OS, rebooting the machine with the -boot media as the virtual hard drive, provisioning software within -the OS, then shutting it down. The result of the Qemu builder is a directory -containing the image file necessary to run the virtual machine on KVM or Xen. +The builder builds a virtual machine by creating a new virtual machine from +scratch, booting it, installing an OS, rebooting the machine with the boot media +as the virtual hard drive, provisioning software within the OS, then shutting it +down. The result of the Qemu builder is a directory containing the image file +necessary to run the virtual machine on KVM or Xen. ## Basic Example -Here is a basic example. This example is functional so long as you fixup -paths to files, URLS for ISOs and checksums. +Here is a basic example. This example is functional so long as you fixup paths +to files, URLS for ISOs and checksums. -```javascript +``` {.javascript} { "builders": [ @@ -62,153 +63,153 @@ paths to files, URLS for ISOs and checksums. } ``` -A working CentOS 6.x kickstart file can be found -[at this URL](https://gist.github.com/mitchellh/7328271/#file-centos6-ks-cfg), adapted from an unknown source. -Place this file in the http directory with the proper name. For the -example above, it should go into "httpdir" with a name of "centos6-ks.cfg". +A working CentOS 6.x kickstart file can be found [at this +URL](https://gist.github.com/mitchellh/7328271/#file-centos6-ks-cfg), adapted +from an unknown source. Place this file in the http directory with the proper +name. For the example above, it should go into "httpdir" with a name of +"centos6-ks.cfg". ## Configuration Reference -There are many configuration options available for the Qemu builder. -They are organized below into two categories: required and optional. Within -each category, the available options are alphabetized and described. +There are many configuration options available for the Qemu builder. They are +organized below into two categories: required and optional. Within each +category, the available options are alphabetized and described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO - files are so large, this is required and Packer will verify it prior - to booting a virtual machine with the ISO attached. The type of the - checksum is specified with `iso_checksum_type`, documented below. +- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO files + are so large, this is required and Packer will verify it prior to booting a + virtual machine with the ISO attached. The type of the checksum is specified + with `iso_checksum_type`, documented below. -* `iso_checksum_type` (string) - The type of the checksum specified in - `iso_checksum`. Valid values are "md5", "sha1", "sha256", or "sha512" currently. +- `iso_checksum_type` (string) - The type of the checksum specified in + `iso_checksum`. Valid values are "md5", "sha1", "sha256", or + "sha512" currently. -* `iso_url` (string) - A URL to the ISO containing the installation image. - This URL can be either an HTTP URL or a file URL (or path to a file). - If this is an HTTP URL, Packer will download it and cache it between - runs. +- `iso_url` (string) - A URL to the ISO containing the installation image. This + URL can be either an HTTP URL or a file URL (or path to a file). If this is an + HTTP URL, Packer will download it and cache it between runs. -* `ssh_username` (string) - The username to use to SSH into the machine - once the OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once the + OS is installed. ### Optional: -* `accelerator` (string) - The accelerator type to use when running the VM. - This may have a value of either "none", "kvm", "tcg", or "xen" and you must have that - support in on the machine on which you run the builder. By default "kvm" +- `accelerator` (string) - The accelerator type to use when running the VM. This + may have a value of either "none", "kvm", "tcg", or "xen" and you must have + that support in on the machine on which you run the builder. By default "kvm" is used. -* `boot_command` (array of strings) - This is an array of commands to type - when the virtual machine is first booted. The goal of these commands should - be to type just enough to initialize the operating system installer. Special - keys can be typed as well, and are covered in the section below on the boot - command. If this is not specified, it is assumed the installer will start - itself. +- `boot_command` (array of strings) - This is an array of commands to type when + the virtual machine is first booted. The goal of these commands should be to + type just enough to initialize the operating system installer. Special keys + can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -* `boot_wait` (string) - The time to wait after booting the initial virtual +- `boot_wait` (string) - The time to wait after booting the initial virtual machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait - five seconds and one minute 30 seconds, respectively. If this isn't specified, - the default is 10 seconds. + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five + seconds and one minute 30 seconds, respectively. If this isn't specified, the + default is 10 seconds. -* `disk_cache` (string) - The cache mode to use for disk. Allowed values - include any of "writethrough", "writeback", "none", "unsafe" or - "directsync". By default, this is set to "writeback". +- `disk_cache` (string) - The cache mode to use for disk. Allowed values include + any of "writethrough", "writeback", "none", "unsafe" or "directsync". By + default, this is set to "writeback". -* `disk_discard` (string) - The discard mode to use for disk. Allowed values - include any of "unmap" or "ignore". By default, this is set to "ignore". +- `disk_discard` (string) - The discard mode to use for disk. Allowed values + include any of "unmap" or "ignore". By default, this is set to "ignore". -* `disk_image` (boolean) - Packer defaults to building from an ISO file, - this parameter controls whether the ISO URL supplied is actually a bootable - QEMU image. When this value is set to true, the machine will clone the - source, resize it according to `disk_size` and boot the image. +- `disk_image` (boolean) - Packer defaults to building from an ISO file, this + parameter controls whether the ISO URL supplied is actually a bootable + QEMU image. When this value is set to true, the machine will clone the source, + resize it according to `disk_size` and boot the image. -* `disk_interface` (string) - The interface to use for the disk. Allowed - values include any of "ide," "scsi" or "virtio." Note also that any boot - commands or kickstart type scripts must have proper adjustments for - resulting device names. The Qemu builder uses "virtio" by default. +- `disk_interface` (string) - The interface to use for the disk. Allowed values + include any of "ide," "scsi" or "virtio." Note also that any boot commands or + kickstart type scripts must have proper adjustments for resulting + device names. The Qemu builder uses "virtio" by default. -* `disk_size` (integer) - The size, in megabytes, of the hard disk to create - for the VM. By default, this is 40000 (about 40 GB). +- `disk_size` (integer) - The size, in megabytes, of the hard disk to create for + the VM. By default, this is 40000 (about 40 GB). -* `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful - for unattended Windows installs, which look for an `Autounattend.xml` file - on removable media. By default, no floppy will be attached. All files - listed in this setting get placed into the root directory of the floppy - and the floppy is attached as the first floppy device. Currently, no - support exists for creating sub-directories on the floppy. Wildcard - characters (*, ?, and []) are allowed. Directory names are also allowed, - which will add all the files found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) + are allowed. Directory names are also allowed, which will add all the files + found in the directory to the floppy. -* `format` (string) - Either "qcow2" or "raw", this specifies the output - format of the virtual machine image. This defaults to "qcow2". +- `format` (string) - Either "qcow2" or "raw", this specifies the output format + of the virtual machine image. This defaults to "qcow2". -* `headless` (boolean) - Packer defaults to building QEMU virtual machines by - launching a GUI that shows the console of the machine being built. - When this value is set to true, the machine will start without a console. +- `headless` (boolean) - Packer defaults to building QEMU virtual machines by + launching a GUI that shows the console of the machine being built. When this + value is set to true, the machine will start without a console. -* `http_directory` (string) - Path to a directory to serve using an HTTP - server. The files in this directory will be available over HTTP that will - be requestable from the virtual machine. This is useful for hosting - kickstart files and so on. By default this is "", which means no HTTP - server will be started. The address and port of the HTTP server will be - available as variables in `boot_command`. This is covered in more detail - below. +- `http_directory` (string) - Path to a directory to serve using an HTTP server. + The files in this directory will be available over HTTP that will be + requestable from the virtual machine. This is useful for hosting kickstart + files and so on. By default this is "", which means no HTTP server will + be started. The address and port of the HTTP server will be available as + variables in `boot_command`. This is covered in more detail below. -* `http_port_min` and `http_port_max` (integer) - These are the minimum and +- `http_port_min` and `http_port_max` (integer) - These are the minimum and maximum port to use for the HTTP server started to serve the `http_directory`. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. - By default the values are 8000 and 9000, respectively. + server to be on one port, make this minimum and maximum port the same. By + default the values are 8000 and 9000, respectively. -* `iso_urls` (array of strings) - Multiple URLs for the ISO to download. - Packer will try these in order. If anything goes wrong attempting to download - or while downloading a single URL, it will move on to the next. All URLs - must point to the same file (same checksum). By default this is empty - and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. +- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer + will try these in order. If anything goes wrong attempting to download or + while downloading a single URL, it will move on to the next. All URLs must + point to the same file (same checksum). By default this is empty and `iso_url` + is used. Only one of `iso_url` or `iso_urls` can be specified. -* `machine_type` (string) - The type of machine emulation to use. Run - your qemu binary with the flags `-machine help` to list available types - for your system. This defaults to "pc". +- `machine_type` (string) - The type of machine emulation to use. Run your qemu + binary with the flags `-machine help` to list available types for your system. + This defaults to "pc". -* `net_device` (string) - The driver to use for the network interface. Allowed - values "ne2k_pci," "i82551," "i82557b," "i82559er," "rtl8139," "e1000," +- `net_device` (string) - The driver to use for the network interface. Allowed + values "ne2k\_pci," "i82551," "i82557b," "i82559er," "rtl8139," "e1000," "pcnet" or "virtio." The Qemu builder uses "virtio" by default. -* `output_directory` (string) - This is the path to the directory where the +- `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running the builder. - By default this is "output-BUILDNAME" where "BUILDNAME" is the name - of the build. + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -* `qemu_binary` (string) - The name of the Qemu binary to look for. This - defaults to "qemu-system-x86_64", but may need to be changed for some - platforms. For example "qemu-kvm", or "qemu-system-i386" may be a better +- `qemu_binary` (string) - The name of the Qemu binary to look for. This + defaults to "qemu-system-x86\_64", but may need to be changed for + some platforms. For example "qemu-kvm", or "qemu-system-i386" may be a better choice for some systems. -* `qemuargs` (array of array of strings) - Allows complete control over - the qemu command line (though not, at this time, qemu-img). Each array - of strings makes up a command line switch that overrides matching default - switch/value pairs. Any value specified as an empty string is ignored. - All values after the switch are concatenated with no separator. +- `qemuargs` (array of array of strings) - Allows complete control over the qemu + command line (though not, at this time, qemu-img). Each array of strings makes + up a command line switch that overrides matching default switch/value pairs. + Any value specified as an empty string is ignored. All values after the switch + are concatenated with no separator. -~> **Warning:** The qemu command line allows extreme flexibility, so beware of -conflicting arguments causing failures of your run. For instance, using +\~> **Warning:** The qemu command line allows extreme flexibility, so beware +of conflicting arguments causing failures of your run. For instance, using --no-acpi could break the ability to send power signal type commands (e.g., -shutdown -P now) to the virtual machine, thus preventing proper shutdown. To -see the defaults, look in the packer.log file and search for the -qemu-system-x86 command. The arguments are all printed for review. +shutdown -P now) to the virtual machine, thus preventing proper shutdown. To see +the defaults, look in the packer.log file and search for the qemu-system-x86 +command. The arguments are all printed for review. The following shows a sample usage: -```javascript +``` {.javascript} // ... "qemuargs": [ [ "-m", "1024M" ], @@ -227,88 +228,87 @@ qemu-system-x86 command. The arguments are all printed for review. would produce the following (not including other defaults supplied by the builder and not otherwise conflicting with the qemuargs):
    -	qemu-system-x86 -m 1024m --no-acpi -netdev user,id=mynet0,hostfwd=hostip:hostport-guestip:guestport -device virtio-net,netdev=mynet0"
    +  qemu-system-x86 -m 1024m --no-acpi -netdev user,id=mynet0,hostfwd=hostip:hostport-guestip:guestport -device virtio-net,netdev=mynet0"
     
    +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty string, + which tells Packer to just forcefully shut down the machine. -* `shutdown_command` (string) - The command to use to gracefully shut down - the machine once all the provisioning is done. By default this is an empty - string, which tells Packer to just forcefully shut down the machine. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -* `shutdown_timeout` (string) - The amount of time to wait after executing - the `shutdown_command` for the virtual machine to actually shut down. - If it doesn't shut down in this time, it is an error. By default, the timeout - is "5m", or five minutes. - -* `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and - maximum port to use for the SSH port on the host machine which is forwarded - to the SSH port on the guest machine. Because Packer often runs in parallel, +- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and + maximum port to use for the SSH port on the host machine which is forwarded to + the SSH port on the guest machine. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to use as the host port. -* `vm_name` (string) - This is the name of the image (QCOW2 or IMG) file for - the new virtual machine, without the file extension. By default this is +- `vm_name` (string) - This is the name of the image (QCOW2 or IMG) file for the + new virtual machine, without the file extension. By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build. -* `vnc_port_min` and `vnc_port_max` (integer) - The minimum and - maximum port to use for the VNC port on the host machine which is forwarded - to the VNC port on the guest machine. Because Packer often runs in parallel, - Packer will choose a randomly available port in this range to use as the - host port. +- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to + use for the VNC port on the host machine which is forwarded to the VNC port on + the guest machine. Because Packer often runs in parallel, Packer will choose a + randomly available port in this range to use as the host port. ## Boot Command -The `boot_command` configuration is very important: it specifies the keys -to type when the virtual machine is first booted in order to start the -OS installer. This command is typed after `boot_wait`, which gives the -virtual machine some time to actually load the ISO. +The `boot_command` configuration is very important: it specifies the keys to +type when the virtual machine is first booted in order to start the OS +installer. This command is typed after `boot_wait`, which gives the virtual +machine some time to actually load the ISO. -As documented above, the `boot_command` is an array of strings. The -strings are all typed in sequence. It is an array only to improve readability -within the template. +As documented above, the `boot_command` is an array of strings. The strings are +all typed in sequence. It is an array only to improve readability within the +template. -The boot command is "typed" character for character over a VNC connection -to the machine, simulating a human actually typing the keyboard. There are -a set of special keys available. If these are in your boot command, they -will be replaced by the proper key: +The boot command is "typed" character for character over a VNC connection to the +machine, simulating a human actually typing the keyboard. There are a set of +special keys available. If these are in your boot command, they will be replaced +by the proper key: -* `` - Backspace +- `` - Backspace -* `` - Delete +- `` - Delete -* `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -* `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -* `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -* `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -* `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -* `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -* `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -* `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -* `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -* `` `` `` - Adds a 1, 5 or 10 second pause before sending any additional keys. This - is useful if you have to generally wait for the UI to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before sending + any additional keys. This is useful if you have to generally wait for the UI + to update before typing more. In addition to the special keys, each command to type is treated as a -[configuration template](/docs/templates/configuration-templates.html). -The available variables are: +[configuration template](/docs/templates/configuration-templates.html). The +available variables are: -* `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server - that is started serving the directory specified by the `http_directory` - configuration parameter. If `http_directory` isn't specified, these will - be blank! +- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server that + is started serving the directory specified by the `http_directory` + configuration parameter. If `http_directory` isn't specified, these will be + blank! -Example boot command. This is actually a working boot command used to start -an CentOS 6.4 installer: +Example boot command. This is actually a working boot command used to start an +CentOS 6.4 installer: -```javascript +``` {.javascript} "boot_command": [ "", diff --git a/website/source/docs/builders/virtualbox-iso.html.markdown b/website/source/docs/builders/virtualbox-iso.html.markdown index 97ba056f8..bdccdf768 100644 --- a/website/source/docs/builders/virtualbox-iso.html.markdown +++ b/website/source/docs/builders/virtualbox-iso.html.markdown @@ -1,30 +1,31 @@ --- -layout: "docs" -page_title: "VirtualBox Builder (from an ISO)" -description: |- - The VirtualBox Packer builder is able to create VirtualBox virtual machines and export them in the OVF format, starting from an ISO image. ---- +description: | + The VirtualBox Packer builder is able to create VirtualBox virtual machines and + export them in the OVF format, starting from an ISO image. +layout: docs +page_title: 'VirtualBox Builder (from an ISO)' +... # VirtualBox Builder (from an ISO) Type: `virtualbox-iso` -The VirtualBox Packer builder is able to create [VirtualBox](https://www.virtualbox.org/) -virtual machines and export them in the OVF format, starting from an -ISO image. +The VirtualBox Packer builder is able to create +[VirtualBox](https://www.virtualbox.org/) virtual machines and export them in +the OVF format, starting from an ISO image. -The builder builds a virtual machine by creating a new virtual machine -from scratch, booting it, installing an OS, provisioning software within -the OS, then shutting it down. The result of the VirtualBox builder is a directory -containing all the files necessary to run the virtual machine portably. +The builder builds a virtual machine by creating a new virtual machine from +scratch, booting it, installing an OS, provisioning software within the OS, then +shutting it down. The result of the VirtualBox builder is a directory containing +all the files necessary to run the virtual machine portably. ## Basic Example -Here is a basic example. This example is not functional. It will start the -OS installer but then fail because we don't provide the preseed file for -Ubuntu to self-install. Still, the example serves to show the basic configuration: +Here is a basic example. This example is not functional. It will start the OS +installer but then fail because we don't provide the preseed file for Ubuntu to +self-install. Still, the example serves to show the basic configuration: -```javascript +``` {.javascript} { "type": "virtualbox-iso", "guest_os_type": "Ubuntu_64", @@ -37,250 +38,249 @@ Ubuntu to self-install. Still, the example serves to show the basic configuratio } ``` -It is important to add a `shutdown_command`. By default Packer halts the -virtual machine and the file system may not be sync'd. Thus, changes made in a +It is important to add a `shutdown_command`. By default Packer halts the virtual +machine and the file system may not be sync'd. Thus, changes made in a provisioner might not be saved. ## Configuration Reference -There are many configuration options available for the VirtualBox builder. -They are organized below into two categories: required and optional. Within -each category, the available options are alphabetized and described. +There are many configuration options available for the VirtualBox builder. They +are organized below into two categories: required and optional. Within each +category, the available options are alphabetized and described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO - files are so large, this is required and Packer will verify it prior - to booting a virtual machine with the ISO attached. The type of the - checksum is specified with `iso_checksum_type`, documented below. +- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO files + are so large, this is required and Packer will verify it prior to booting a + virtual machine with the ISO attached. The type of the checksum is specified + with `iso_checksum_type`, documented below. -* `iso_checksum_type` (string) - The type of the checksum specified in +- `iso_checksum_type` (string) - The type of the checksum specified in `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or "sha512" currently. While "none" will skip checksumming, this is not recommended since ISO files are generally large and corruption does happen from time to time. -* `iso_url` (string) - A URL to the ISO containing the installation image. - This URL can be either an HTTP URL or a file URL (or path to a file). - If this is an HTTP URL, Packer will download it and cache it between - runs. +- `iso_url` (string) - A URL to the ISO containing the installation image. This + URL can be either an HTTP URL or a file URL (or path to a file). If this is an + HTTP URL, Packer will download it and cache it between runs. -* `ssh_username` (string) - The username to use to SSH into the machine - once the OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once the + OS is installed. ### Optional: -* `boot_command` (array of strings) - This is an array of commands to type - when the virtual machine is first booted. The goal of these commands should - be to type just enough to initialize the operating system installer. Special - keys can be typed as well, and are covered in the section below on the boot - command. If this is not specified, it is assumed the installer will start - itself. +- `boot_command` (array of strings) - This is an array of commands to type when + the virtual machine is first booted. The goal of these commands should be to + type just enough to initialize the operating system installer. Special keys + can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -* `boot_wait` (string) - The time to wait after booting the initial virtual +- `boot_wait` (string) - The time to wait after booting the initial virtual machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait - five seconds and one minute 30 seconds, respectively. If this isn't specified, - the default is 10 seconds. + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five + seconds and one minute 30 seconds, respectively. If this isn't specified, the + default is 10 seconds. -* `disk_size` (integer) - The size, in megabytes, of the hard disk to create - for the VM. By default, this is 40000 (about 40 GB). +- `disk_size` (integer) - The size, in megabytes, of the hard disk to create for + the VM. By default, this is 40000 (about 40 GB). -* `export_opts` (array of strings) - Additional options to pass to the `VBoxManage export`. - This can be useful for passing product information to include in the resulting - appliance file. +- `export_opts` (array of strings) - Additional options to pass to the + `VBoxManage export`. This can be useful for passing product information to + include in the resulting appliance file. -* `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful - for unattended Windows installs, which look for an `Autounattend.xml` file - on removable media. By default, no floppy will be attached. All files - listed in this setting get placed into the root directory of the floppy - and the floppy is attached as the first floppy device. Currently, no - support exists for creating sub-directories on the floppy. Wildcard - characters (*, ?, and []) are allowed. Directory names are also allowed, - which will add all the files found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) + are allowed. Directory names are also allowed, which will add all the files + found in the directory to the floppy. -* `format` (string) - Either "ovf" or "ova", this specifies the output - format of the exported virtual machine. This defaults to "ovf". +- `format` (string) - Either "ovf" or "ova", this specifies the output format of + the exported virtual machine. This defaults to "ovf". -* `guest_additions_mode` (string) - The method by which guest additions - are made available to the guest for installation. Valid options are - "upload", "attach", or "disable". If the mode is "attach" the guest - additions ISO will be attached as a CD device to the virtual machine. - If the mode is "upload" the guest additions ISO will be uploaded to - the path specified by `guest_additions_path`. The default value is - "upload". If "disable" is used, guest additions won't be downloaded, - either. +- `guest_additions_mode` (string) - The method by which guest additions are made + available to the guest for installation. Valid options are "upload", "attach", + or "disable". If the mode is "attach" the guest additions ISO will be attached + as a CD device to the virtual machine. If the mode is "upload" the guest + additions ISO will be uploaded to the path specified by + `guest_additions_path`. The default value is "upload". If "disable" is used, + guest additions won't be downloaded, either. -* `guest_additions_path` (string) - The path on the guest virtual machine - where the VirtualBox guest additions ISO will be uploaded. By default this - is "VBoxGuestAdditions.iso" which should upload into the login directory - of the user. This is a [configuration template](/docs/templates/configuration-templates.html) - where the `Version` variable is replaced with the VirtualBox version. +- `guest_additions_path` (string) - The path on the guest virtual machine where + the VirtualBox guest additions ISO will be uploaded. By default this is + "VBoxGuestAdditions.iso" which should upload into the login directory of + the user. This is a [configuration + template](/docs/templates/configuration-templates.html) where the `Version` + variable is replaced with the VirtualBox version. -* `guest_additions_sha256` (string) - The SHA256 checksum of the guest - additions ISO that will be uploaded to the guest VM. By default the - checksums will be downloaded from the VirtualBox website, so this only - needs to be set if you want to be explicit about the checksum. +- `guest_additions_sha256` (string) - The SHA256 checksum of the guest additions + ISO that will be uploaded to the guest VM. By default the checksums will be + downloaded from the VirtualBox website, so this only needs to be set if you + want to be explicit about the checksum. -* `guest_additions_url` (string) - The URL to the guest additions ISO - to upload. This can also be a file URL if the ISO is at a local path. - By default, the VirtualBox builder will attempt to find the guest additions - ISO on the local file system. If it is not available locally, the builder - will download the proper guest additions ISO from the internet. +- `guest_additions_url` (string) - The URL to the guest additions ISO to upload. + This can also be a file URL if the ISO is at a local path. By default, the + VirtualBox builder will attempt to find the guest additions ISO on the local + file system. If it is not available locally, the builder will download the + proper guest additions ISO from the internet. -* `guest_os_type` (string) - The guest OS type being installed. By default - this is "other", but you can get _dramatic_ performance improvements by - setting this to the proper value. To view all available values for this - run `VBoxManage list ostypes`. Setting the correct value hints to VirtualBox - how to optimize the virtual hardware to work best with that operating - system. +- `guest_os_type` (string) - The guest OS type being installed. By default this + is "other", but you can get *dramatic* performance improvements by setting + this to the proper value. To view all available values for this run + `VBoxManage list ostypes`. Setting the correct value hints to VirtualBox how + to optimize the virtual hardware to work best with that operating system. -* `hard_drive_interface` (string) - The type of controller that the primary - hard drive is attached to, defaults to "ide". When set to "sata", the - drive is attached to an AHCI SATA controller. When set to "scsi", the drive - is attached to an LsiLogic SCSI controller. +- `hard_drive_interface` (string) - The type of controller that the primary hard + drive is attached to, defaults to "ide". When set to "sata", the drive is + attached to an AHCI SATA controller. When set to "scsi", the drive is attached + to an LsiLogic SCSI controller. -* `headless` (boolean) - Packer defaults to building VirtualBox - virtual machines by launching a GUI that shows the console of the - machine being built. When this value is set to true, the machine will - start without a console. +- `headless` (boolean) - Packer defaults to building VirtualBox virtual machines + by launching a GUI that shows the console of the machine being built. When + this value is set to true, the machine will start without a console. -* `http_directory` (string) - Path to a directory to serve using an HTTP - server. The files in this directory will be available over HTTP that will - be requestable from the virtual machine. This is useful for hosting - kickstart files and so on. By default this is "", which means no HTTP - server will be started. The address and port of the HTTP server will be - available as variables in `boot_command`. This is covered in more detail - below. +- `http_directory` (string) - Path to a directory to serve using an HTTP server. + The files in this directory will be available over HTTP that will be + requestable from the virtual machine. This is useful for hosting kickstart + files and so on. By default this is "", which means no HTTP server will + be started. The address and port of the HTTP server will be available as + variables in `boot_command`. This is covered in more detail below. -* `http_port_min` and `http_port_max` (integer) - These are the minimum and +- `http_port_min` and `http_port_max` (integer) - These are the minimum and maximum port to use for the HTTP server started to serve the `http_directory`. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. - By default the values are 8000 and 9000, respectively. + server to be on one port, make this minimum and maximum port the same. By + default the values are 8000 and 9000, respectively. -* `iso_interface` (string) - The type of controller that the ISO is attached - to, defaults to "ide". When set to "sata", the drive is attached to an - AHCI SATA controller. +- `iso_interface` (string) - The type of controller that the ISO is attached to, + defaults to "ide". When set to "sata", the drive is attached to an AHCI + SATA controller. -* `iso_urls` (array of strings) - Multiple URLs for the ISO to download. - Packer will try these in order. If anything goes wrong attempting to download - or while downloading a single URL, it will move on to the next. All URLs - must point to the same file (same checksum). By default this is empty - and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. +- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer + will try these in order. If anything goes wrong attempting to download or + while downloading a single URL, it will move on to the next. All URLs must + point to the same file (same checksum). By default this is empty and `iso_url` + is used. Only one of `iso_url` or `iso_urls` can be specified. -* `output_directory` (string) - This is the path to the directory where the +- `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running the builder. - By default this is "output-BUILDNAME" where "BUILDNAME" is the name - of the build. + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -* `shutdown_command` (string) - The command to use to gracefully shut down the machine once all - the provisioning is done. By default this is an empty string, which tells Packer to just - forcefully shut down the machine unless a shutdown command takes place inside script so this may - safely be omitted. If one or more scripts require a reboot it is suggested to leave this blank - since reboots may fail and specify the final shutdown command in your last script. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty string, + which tells Packer to just forcefully shut down the machine unless a shutdown + command takes place inside script so this may safely be omitted. If one or + more scripts require a reboot it is suggested to leave this blank since + reboots may fail and specify the final shutdown command in your last script. -* `shutdown_timeout` (string) - The amount of time to wait after executing - the `shutdown_command` for the virtual machine to actually shut down. - If it doesn't shut down in this time, it is an error. By default, the timeout - is "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -* `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and - maximum port to use for the SSH port on the host machine which is forwarded - to the SSH port on the guest machine. Because Packer often runs in parallel, +- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and + maximum port to use for the SSH port on the host machine which is forwarded to + the SSH port on the guest machine. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to use as the host port. -* `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer does - not setup forwarded port mapping for SSH requests and uses `ssh_port` on the - host to communicate to the virtual machine +- `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer + does not setup forwarded port mapping for SSH requests and uses `ssh_port` on + the host to communicate to the virtual machine -* `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to - execute in order to further customize the virtual machine being created. - The value of this is an array of commands to execute. The commands are executed - in the order defined in the template. For each command, the command is - defined itself as an array of strings, where each string represents a single - argument on the command-line to `VBoxManage` (but excluding `VBoxManage` - itself). Each arg is treated as a [configuration template](/docs/templates/configuration-templates.html), - where the `Name` variable is replaced with the VM name. More details on how - to use `VBoxManage` are below. +- `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to + execute in order to further customize the virtual machine being created. The + value of this is an array of commands to execute. The commands are executed in + the order defined in the template. For each command, the command is defined + itself as an array of strings, where each string represents a single argument + on the command-line to `VBoxManage` (but excluding `VBoxManage` itself). Each + arg is treated as a [configuration + template](/docs/templates/configuration-templates.html), where the `Name` + variable is replaced with the VM name. More details on how to use `VBoxManage` + are below. -* `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`, +- `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`, except that it is run after the virtual machine is shutdown, and before the virtual machine is exported. -* `virtualbox_version_file` (string) - The path within the virtual machine - to upload a file that contains the VirtualBox version that was used to - create the machine. This information can be useful for provisioning. - By default this is ".vbox_version", which will generally be upload it into - the home directory. +- `virtualbox_version_file` (string) - The path within the virtual machine to + upload a file that contains the VirtualBox version that was used to create + the machine. This information can be useful for provisioning. By default this + is ".vbox\_version", which will generally be upload it into the + home directory. -* `vm_name` (string) - This is the name of the OVF file for the new virtual +- `vm_name` (string) - This is the name of the OVF file for the new virtual machine, without the file extension. By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build. ## Boot Command -The `boot_command` configuration is very important: it specifies the keys -to type when the virtual machine is first booted in order to start the -OS installer. This command is typed after `boot_wait`, which gives the -virtual machine some time to actually load the ISO. +The `boot_command` configuration is very important: it specifies the keys to +type when the virtual machine is first booted in order to start the OS +installer. This command is typed after `boot_wait`, which gives the virtual +machine some time to actually load the ISO. -As documented above, the `boot_command` is an array of strings. The -strings are all typed in sequence. It is an array only to improve readability -within the template. +As documented above, the `boot_command` is an array of strings. The strings are +all typed in sequence. It is an array only to improve readability within the +template. -The boot command is "typed" character for character over a VNC connection -to the machine, simulating a human actually typing the keyboard. There are -a set of special keys available. If these are in your boot command, they -will be replaced by the proper key: +The boot command is "typed" character for character over a VNC connection to the +machine, simulating a human actually typing the keyboard. There are a set of +special keys available. If these are in your boot command, they will be replaced +by the proper key: -* `` - Backspace +- `` - Backspace -* `` - Delete +- `` - Delete -* `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -* `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -* `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -* `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -* `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -* `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -* `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -* `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -* `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -* `` `` `` - Adds a 1, 5 or 10 second pause before sending any additional keys. This - is useful if you have to generally wait for the UI to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before sending + any additional keys. This is useful if you have to generally wait for the UI + to update before typing more. In addition to the special keys, each command to type is treated as a -[configuration template](/docs/templates/configuration-templates.html). -The available variables are: +[configuration template](/docs/templates/configuration-templates.html). The +available variables are: -* `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server - that is started serving the directory specified by the `http_directory` - configuration parameter. If `http_directory` isn't specified, these will - be blank! +- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server that + is started serving the directory specified by the `http_directory` + configuration parameter. If `http_directory` isn't specified, these will be + blank! -Example boot command. This is actually a working boot command used to start -an Ubuntu 12.04 installer: +Example boot command. This is actually a working boot command used to start an +Ubuntu 12.04 installer: -```text +``` {.text} [ "", "/install/vmlinuz noapic ", @@ -296,31 +296,32 @@ an Ubuntu 12.04 installer: ## Guest Additions -Packer will automatically download the proper guest additions for the -version of VirtualBox that is running and upload those guest additions into -the virtual machine so that provisioners can easily install them. +Packer will automatically download the proper guest additions for the version of +VirtualBox that is running and upload those guest additions into the virtual +machine so that provisioners can easily install them. -Packer downloads the guest additions from the official VirtualBox website, -and verifies the file with the official checksums released by VirtualBox. +Packer downloads the guest additions from the official VirtualBox website, and +verifies the file with the official checksums released by VirtualBox. -After the virtual machine is up and the operating system is installed, -Packer uploads the guest additions into the virtual machine. The path where -they are uploaded is controllable by `guest_additions_path`, and defaults -to "VBoxGuestAdditions.iso". Without an absolute path, it is uploaded to the -home directory of the SSH user. +After the virtual machine is up and the operating system is installed, Packer +uploads the guest additions into the virtual machine. The path where they are +uploaded is controllable by `guest_additions_path`, and defaults to +"VBoxGuestAdditions.iso". Without an absolute path, it is uploaded to the home +directory of the SSH user. ## VBoxManage Commands -In order to perform extra customization of the virtual machine, a template -can define extra calls to `VBoxManage` to perform. [VBoxManage](http://www.virtualbox.org/manual/ch08.html) -is the command-line interface to VirtualBox where you can completely control -VirtualBox. It can be used to do things such as set RAM, CPUs, etc. +In order to perform extra customization of the virtual machine, a template can +define extra calls to `VBoxManage` to perform. +[VBoxManage](http://www.virtualbox.org/manual/ch08.html) is the command-line +interface to VirtualBox where you can completely control VirtualBox. It can be +used to do things such as set RAM, CPUs, etc. -Extra VBoxManage commands are defined in the template in the `vboxmanage` section. -An example is shown below that sets the memory and number of CPUs within the -virtual machine: +Extra VBoxManage commands are defined in the template in the `vboxmanage` +section. An example is shown below that sets the memory and number of CPUs +within the virtual machine: -```javascript +``` {.javascript} { "vboxmanage": [ ["modifyvm", "{{.Name}}", "--memory", "1024"], @@ -329,12 +330,12 @@ virtual machine: } ``` -The value of `vboxmanage` is an array of commands to execute. These commands -are executed in the order defined. So in the above example, the memory will be -set followed by the CPUs. +The value of `vboxmanage` is an array of commands to execute. These commands are +executed in the order defined. So in the above example, the memory will be set +followed by the CPUs. -Each command itself is an array of strings, where each string is an argument -to `VBoxManage`. Each argument is treated as a -[configuration template](/docs/templates/configuration-templates.html). -The only available variable is `Name` which is replaced with the unique -name of the VM, which is required for many VBoxManage calls. +Each command itself is an array of strings, where each string is an argument to +`VBoxManage`. Each argument is treated as a [configuration +template](/docs/templates/configuration-templates.html). The only available +variable is `Name` which is replaced with the unique name of the VM, which is +required for many VBoxManage calls. diff --git a/website/source/docs/builders/virtualbox-ovf.html.markdown b/website/source/docs/builders/virtualbox-ovf.html.markdown index 0a4516d02..dcf5dbd5c 100644 --- a/website/source/docs/builders/virtualbox-ovf.html.markdown +++ b/website/source/docs/builders/virtualbox-ovf.html.markdown @@ -1,39 +1,41 @@ --- -layout: "docs" -page_title: "VirtualBox Builder (from an OVF/OVA)" -description: |- - This VirtualBox Packer builder is able to create VirtualBox virtual machines and export them in the OVF format, starting from an existing OVF/OVA (exported virtual machine image). ---- +description: | + This VirtualBox Packer builder is able to create VirtualBox virtual machines and + export them in the OVF format, starting from an existing OVF/OVA (exported + virtual machine image). +layout: docs +page_title: 'VirtualBox Builder (from an OVF/OVA)' +... # VirtualBox Builder (from an OVF/OVA) Type: `virtualbox-ovf` -This VirtualBox Packer builder is able to create [VirtualBox](https://www.virtualbox.org/) -virtual machines and export them in the OVF format, starting from an -existing OVF/OVA (exported virtual machine image). +This VirtualBox Packer builder is able to create +[VirtualBox](https://www.virtualbox.org/) virtual machines and export them in +the OVF format, starting from an existing OVF/OVA (exported virtual machine +image). -When exporting from VirtualBox make sure to choose OVF Version 2, since Version 1 is not compatible and will generate errors like this: +When exporting from VirtualBox make sure to choose OVF Version 2, since Version +1 is not compatible and will generate errors like this: -``` -==> virtualbox-ovf: Progress state: VBOX_E_FILE_ERROR -==> virtualbox-ovf: VBoxManage: error: Appliance read failed -==> virtualbox-ovf: VBoxManage: error: Error reading "source.ova": element "Section" has no "type" attribute, line 21 -==> virtualbox-ovf: VBoxManage: error: Details: code VBOX_E_FILE_ERROR (0x80bb0004), component Appliance, interface IAppliance -==> virtualbox-ovf: VBoxManage: error: Context: "int handleImportAppliance(HandlerArg*)" at line 304 of file VBoxManageAppliance.cpp -``` + ==> virtualbox-ovf: Progress state: VBOX_E_FILE_ERROR + ==> virtualbox-ovf: VBoxManage: error: Appliance read failed + ==> virtualbox-ovf: VBoxManage: error: Error reading "source.ova": element "Section" has no "type" attribute, line 21 + ==> virtualbox-ovf: VBoxManage: error: Details: code VBOX_E_FILE_ERROR (0x80bb0004), component Appliance, interface IAppliance + ==> virtualbox-ovf: VBoxManage: error: Context: "int handleImportAppliance(HandlerArg*)" at line 304 of file VBoxManageAppliance.cpp -The builder builds a virtual machine by importing an existing OVF or OVA -file. It then boots this image, runs provisioners on this new VM, and -exports that VM to create the image. The imported machine is deleted prior -to finishing the build. +The builder builds a virtual machine by importing an existing OVF or OVA file. +It then boots this image, runs provisioners on this new VM, and exports that VM +to create the image. The imported machine is deleted prior to finishing the +build. ## Basic Example Here is a basic example. This example is functional if you have an OVF matching the settings here. -```javascript +``` {.javascript} { "type": "virtualbox-ovf", "source_path": "source.ovf", @@ -43,193 +45,194 @@ the settings here. } ``` -It is important to add a `shutdown_command`. By default Packer halts the -virtual machine and the file system may not be sync'd. Thus, changes made in a +It is important to add a `shutdown_command`. By default Packer halts the virtual +machine and the file system may not be sync'd. Thus, changes made in a provisioner might not be saved. ## Configuration Reference -There are many configuration options available for the VirtualBox builder. -They are organized below into two categories: required and optional. Within -each category, the available options are alphabetized and described. +There are many configuration options available for the VirtualBox builder. They +are organized below into two categories: required and optional. Within each +category, the available options are alphabetized and described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `source_path` (string) - The path to an OVF or OVA file that acts as - the source of this build. +- `source_path` (string) - The path to an OVF or OVA file that acts as the + source of this build. -* `ssh_username` (string) - The username to use to SSH into the machine - once the OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once the + OS is installed. ### Optional: -* `boot_command` (array of strings) - This is an array of commands to type - when the virtual machine is first booted. The goal of these commands should - be to type just enough to initialize the operating system installer. Special - keys can be typed as well, and are covered in the section below on the boot - command. If this is not specified, it is assumed the installer will start - itself. +- `boot_command` (array of strings) - This is an array of commands to type when + the virtual machine is first booted. The goal of these commands should be to + type just enough to initialize the operating system installer. Special keys + can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -* `boot_wait` (string) - The time to wait after booting the initial virtual +- `boot_wait` (string) - The time to wait after booting the initial virtual machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait - five seconds and one minute 30 seconds, respectively. If this isn't specified, - the default is 10 seconds. + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five + seconds and one minute 30 seconds, respectively. If this isn't specified, the + default is 10 seconds. -* `export_opts` (array of strings) - Additional options to pass to the `VBoxManage export`. - This can be useful for passing product information to include in the resulting - appliance file. +- `export_opts` (array of strings) - Additional options to pass to the + `VBoxManage export`. This can be useful for passing product information to + include in the resulting appliance file. -* `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful - for unattended Windows installs, which look for an `Autounattend.xml` file - on removable media. By default, no floppy will be attached. All files - listed in this setting get placed into the root directory of the floppy - and the floppy is attached as the first floppy device. Currently, no - support exists for creating sub-directories on the floppy. Wildcard - characters (*, ?, and []) are allowed. Directory names are also allowed, - which will add all the files found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) + are allowed. Directory names are also allowed, which will add all the files + found in the directory to the floppy. -* `format` (string) - Either "ovf" or "ova", this specifies the output - format of the exported virtual machine. This defaults to "ovf". +- `format` (string) - Either "ovf" or "ova", this specifies the output format of + the exported virtual machine. This defaults to "ovf". -* `guest_additions_mode` (string) - The method by which guest additions - are made available to the guest for installation. Valid options are - "upload", "attach", or "disable". If the mode is "attach" the guest - additions ISO will be attached as a CD device to the virtual machine. - If the mode is "upload" the guest additions ISO will be uploaded to - the path specified by `guest_additions_path`. The default value is - "upload". If "disable" is used, guest additions won't be downloaded, - either. +- `guest_additions_mode` (string) - The method by which guest additions are made + available to the guest for installation. Valid options are "upload", "attach", + or "disable". If the mode is "attach" the guest additions ISO will be attached + as a CD device to the virtual machine. If the mode is "upload" the guest + additions ISO will be uploaded to the path specified by + `guest_additions_path`. The default value is "upload". If "disable" is used, + guest additions won't be downloaded, either. -* `guest_additions_path` (string) - The path on the guest virtual machine - where the VirtualBox guest additions ISO will be uploaded. By default this - is "VBoxGuestAdditions.iso" which should upload into the login directory - of the user. This is a [configuration template](/docs/templates/configuration-templates.html) - where the `Version` variable is replaced with the VirtualBox version. +- `guest_additions_path` (string) - The path on the guest virtual machine where + the VirtualBox guest additions ISO will be uploaded. By default this is + "VBoxGuestAdditions.iso" which should upload into the login directory of + the user. This is a [configuration + template](/docs/templates/configuration-templates.html) where the `Version` + variable is replaced with the VirtualBox version. -* `guest_additions_sha256` (string) - The SHA256 checksum of the guest - additions ISO that will be uploaded to the guest VM. By default the - checksums will be downloaded from the VirtualBox website, so this only - needs to be set if you want to be explicit about the checksum. +- `guest_additions_sha256` (string) - The SHA256 checksum of the guest additions + ISO that will be uploaded to the guest VM. By default the checksums will be + downloaded from the VirtualBox website, so this only needs to be set if you + want to be explicit about the checksum. -* `guest_additions_url` (string) - The URL to the guest additions ISO - to upload. This can also be a file URL if the ISO is at a local path. - By default the VirtualBox builder will go and download the proper - guest additions ISO from the internet. +- `guest_additions_url` (string) - The URL to the guest additions ISO to upload. + This can also be a file URL if the ISO is at a local path. By default the + VirtualBox builder will go and download the proper guest additions ISO from + the internet. -* `headless` (boolean) - Packer defaults to building VirtualBox - virtual machines by launching a GUI that shows the console of the - machine being built. When this value is set to true, the machine will - start without a console. +- `headless` (boolean) - Packer defaults to building VirtualBox virtual machines + by launching a GUI that shows the console of the machine being built. When + this value is set to true, the machine will start without a console. -* `http_directory` (string) - Path to a directory to serve using an HTTP - server. The files in this directory will be available over HTTP that will - be requestable from the virtual machine. This is useful for hosting - kickstart files and so on. By default this is "", which means no HTTP - server will be started. The address and port of the HTTP server will be - available as variables in `boot_command`. This is covered in more detail - below. +- `http_directory` (string) - Path to a directory to serve using an HTTP server. + The files in this directory will be available over HTTP that will be + requestable from the virtual machine. This is useful for hosting kickstart + files and so on. By default this is "", which means no HTTP server will + be started. The address and port of the HTTP server will be available as + variables in `boot_command`. This is covered in more detail below. -* `http_port_min` and `http_port_max` (integer) - These are the minimum and +- `http_port_min` and `http_port_max` (integer) - These are the minimum and maximum port to use for the HTTP server started to serve the `http_directory`. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. - By default the values are 8000 and 9000, respectively. + server to be on one port, make this minimum and maximum port the same. By + default the values are 8000 and 9000, respectively. -* `import_flags` (array of strings) - Additional flags to pass to - `VBoxManage import`. This can be used to add additional command-line flags - such as `--eula-accept` to accept a EULA in the OVF. +- `import_flags` (array of strings) - Additional flags to pass to + `VBoxManage import`. This can be used to add additional command-line flags + such as `--eula-accept` to accept a EULA in the OVF. -* `import_opts` (string) - Additional options to pass to the `VBoxManage import`. - This can be useful for passing "keepallmacs" or "keepnatmacs" options for existing - ovf images. +- `import_opts` (string) - Additional options to pass to the + `VBoxManage import`. This can be useful for passing "keepallmacs" or + "keepnatmacs" options for existing ovf images. -* `output_directory` (string) - This is the path to the directory where the +- `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running the builder. - By default this is "output-BUILDNAME" where "BUILDNAME" is the name - of the build. + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -* `shutdown_command` (string) - The command to use to gracefully shut down the machine once all - the provisioning is done. By default this is an empty string, which tells Packer to just - forcefully shut down the machine unless a shutdown command takes place inside script so this may - safely be omitted. If one or more scripts require a reboot it is suggested to leave this blank - since reboots may fail and specify the final shutdown command in your last script. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty string, + which tells Packer to just forcefully shut down the machine unless a shutdown + command takes place inside script so this may safely be omitted. If one or + more scripts require a reboot it is suggested to leave this blank since + reboots may fail and specify the final shutdown command in your last script. -* `shutdown_timeout` (string) - The amount of time to wait after executing - the `shutdown_command` for the virtual machine to actually shut down. - If it doesn't shut down in this time, it is an error. By default, the timeout - is "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -* `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and - maximum port to use for the SSH port on the host machine which is forwarded - to the SSH port on the guest machine. Because Packer often runs in parallel, +- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and + maximum port to use for the SSH port on the host machine which is forwarded to + the SSH port on the guest machine. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to use as the host port. -* `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer does - not setup forwarded port mapping for SSH requests and uses `ssh_port` on the - host to communicate to the virtual machine +- `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer + does not setup forwarded port mapping for SSH requests and uses `ssh_port` on + the host to communicate to the virtual machine -* `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to - execute in order to further customize the virtual machine being created. - The value of this is an array of commands to execute. The commands are executed - in the order defined in the template. For each command, the command is - defined itself as an array of strings, where each string represents a single - argument on the command-line to `VBoxManage` (but excluding `VBoxManage` - itself). Each arg is treated as a [configuration template](/docs/templates/configuration-templates.html), - where the `Name` variable is replaced with the VM name. More details on how - to use `VBoxManage` are below. +- `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to + execute in order to further customize the virtual machine being created. The + value of this is an array of commands to execute. The commands are executed in + the order defined in the template. For each command, the command is defined + itself as an array of strings, where each string represents a single argument + on the command-line to `VBoxManage` (but excluding `VBoxManage` itself). Each + arg is treated as a [configuration + template](/docs/templates/configuration-templates.html), where the `Name` + variable is replaced with the VM name. More details on how to use `VBoxManage` + are below. -* `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`, +- `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`, except that it is run after the virtual machine is shutdown, and before the virtual machine is exported. -* `virtualbox_version_file` (string) - The path within the virtual machine - to upload a file that contains the VirtualBox version that was used to - create the machine. This information can be useful for provisioning. - By default this is ".vbox_version", which will generally be upload it into - the home directory. +- `virtualbox_version_file` (string) - The path within the virtual machine to + upload a file that contains the VirtualBox version that was used to create + the machine. This information can be useful for provisioning. By default this + is ".vbox\_version", which will generally be upload it into the + home directory. -* `vm_name` (string) - This is the name of the virtual machine when it is - imported as well as the name of the OVF file when the virtual machine is - exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is - the name of the build. +- `vm_name` (string) - This is the name of the virtual machine when it is + imported as well as the name of the OVF file when the virtual machine + is exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is the + name of the build. ## Guest Additions -Packer will automatically download the proper guest additions for the -version of VirtualBox that is running and upload those guest additions into -the virtual machine so that provisioners can easily install them. +Packer will automatically download the proper guest additions for the version of +VirtualBox that is running and upload those guest additions into the virtual +machine so that provisioners can easily install them. -Packer downloads the guest additions from the official VirtualBox website, -and verifies the file with the official checksums released by VirtualBox. +Packer downloads the guest additions from the official VirtualBox website, and +verifies the file with the official checksums released by VirtualBox. -After the virtual machine is up and the operating system is installed, -Packer uploads the guest additions into the virtual machine. The path where -they are uploaded is controllable by `guest_additions_path`, and defaults -to "VBoxGuestAdditions.iso". Without an absolute path, it is uploaded to the -home directory of the SSH user. +After the virtual machine is up and the operating system is installed, Packer +uploads the guest additions into the virtual machine. The path where they are +uploaded is controllable by `guest_additions_path`, and defaults to +"VBoxGuestAdditions.iso". Without an absolute path, it is uploaded to the home +directory of the SSH user. ## VBoxManage Commands -In order to perform extra customization of the virtual machine, a template -can define extra calls to `VBoxManage` to perform. [VBoxManage](http://www.virtualbox.org/manual/ch08.html) -is the command-line interface to VirtualBox where you can completely control -VirtualBox. It can be used to do things such as set RAM, CPUs, etc. +In order to perform extra customization of the virtual machine, a template can +define extra calls to `VBoxManage` to perform. +[VBoxManage](http://www.virtualbox.org/manual/ch08.html) is the command-line +interface to VirtualBox where you can completely control VirtualBox. It can be +used to do things such as set RAM, CPUs, etc. -Extra VBoxManage commands are defined in the template in the `vboxmanage` section. -An example is shown below that sets the memory and number of CPUs within the -virtual machine: +Extra VBoxManage commands are defined in the template in the `vboxmanage` +section. An example is shown below that sets the memory and number of CPUs +within the virtual machine: -```javascript +``` {.javascript} { "vboxmanage": [ ["modifyvm", "{{.Name}}", "--memory", "1024"], @@ -238,12 +241,12 @@ virtual machine: } ``` -The value of `vboxmanage` is an array of commands to execute. These commands -are executed in the order defined. So in the above example, the memory will be -set followed by the CPUs. +The value of `vboxmanage` is an array of commands to execute. These commands are +executed in the order defined. So in the above example, the memory will be set +followed by the CPUs. -Each command itself is an array of strings, where each string is an argument -to `VBoxManage`. Each argument is treated as a -[configuration template](/docs/templates/configuration-templates.html). -The only available variable is `Name` which is replaced with the unique -name of the VM, which is required for many VBoxManage calls. +Each command itself is an array of strings, where each string is an argument to +`VBoxManage`. Each argument is treated as a [configuration +template](/docs/templates/configuration-templates.html). The only available +variable is `Name` which is replaced with the unique name of the VM, which is +required for many VBoxManage calls. diff --git a/website/source/docs/builders/virtualbox.html.markdown b/website/source/docs/builders/virtualbox.html.markdown index 26e94b5b8..f96d37515 100644 --- a/website/source/docs/builders/virtualbox.html.markdown +++ b/website/source/docs/builders/virtualbox.html.markdown @@ -1,27 +1,28 @@ --- -layout: "docs" -page_title: "VirtualBox Builder" -description: |- - The VirtualBox Packer builder is able to create VirtualBox virtual machines and export them in the OVA or OVF format. ---- +description: | + The VirtualBox Packer builder is able to create VirtualBox virtual machines and + export them in the OVA or OVF format. +layout: docs +page_title: VirtualBox Builder +... # VirtualBox Builder -The VirtualBox Packer builder is able to create [VirtualBox](http://www.virtualbox.org) -virtual machines and export them in the OVA or OVF format. +The VirtualBox Packer builder is able to create +[VirtualBox](http://www.virtualbox.org) virtual machines and export them in the +OVA or OVF format. -Packer actually comes with multiple builders able to create VirtualBox -machines, depending on the strategy you want to use to build the image. -Packer supports the following VirtualBox builders: +Packer actually comes with multiple builders able to create VirtualBox machines, +depending on the strategy you want to use to build the image. Packer supports +the following VirtualBox builders: -* [virtualbox-iso](/docs/builders/virtualbox-iso.html) - Starts from - an ISO file, creates a brand new VirtualBox VM, installs an OS, - provisions software within the OS, then exports that machine to create - an image. This is best for people who want to start from scratch. +- [virtualbox-iso](/docs/builders/virtualbox-iso.html) - Starts from an ISO + file, creates a brand new VirtualBox VM, installs an OS, provisions software + within the OS, then exports that machine to create an image. This is best for + people who want to start from scratch. -* [virtualbox-ovf](/docs/builders/virtualbox-ovf.html) - This builder - imports an existing OVF/OVA file, runs provisioners on top of that VM, - and exports that machine to create an image. This is best if you have - an existing VirtualBox VM export you want to use as the source. As an - additional benefit, you can feed the artifact of this builder back into - itself to iterate on a machine. +- [virtualbox-ovf](/docs/builders/virtualbox-ovf.html) - This builder imports an + existing OVF/OVA file, runs provisioners on top of that VM, and exports that + machine to create an image. This is best if you have an existing VirtualBox VM + export you want to use as the source. As an additional benefit, you can feed + the artifact of this builder back into itself to iterate on a machine. diff --git a/website/source/docs/builders/vmware-iso.html.markdown b/website/source/docs/builders/vmware-iso.html.markdown index 8ac3a9fd3..ad2ac5c33 100644 --- a/website/source/docs/builders/vmware-iso.html.markdown +++ b/website/source/docs/builders/vmware-iso.html.markdown @@ -1,37 +1,40 @@ --- -layout: "docs" -page_title: "VMware Builder from ISO" -description: |- - This VMware Packer builder is able to create VMware virtual machines from an ISO file as a source. It currently supports building virtual machines on hosts running VMware Fusion for OS X, VMware Workstation for Linux and Windows, and VMware Player on Linux. It can also build machines directly on VMware vSphere Hypervisor using SSH as opposed to the vSphere API. ---- +description: | + This VMware Packer builder is able to create VMware virtual machines from an ISO + file as a source. It currently supports building virtual machines on hosts + running VMware Fusion for OS X, VMware Workstation for Linux and Windows, and + VMware Player on Linux. It can also build machines directly on VMware vSphere + Hypervisor using SSH as opposed to the vSphere API. +layout: docs +page_title: VMware Builder from ISO +... # VMware Builder (from ISO) Type: `vmware-iso` -This VMware Packer builder is able to create VMware virtual machines from an -ISO file as a source. It currently -supports building virtual machines on hosts running -[VMware Fusion](http://www.vmware.com/products/fusion/overview.html) for OS X, -[VMware Workstation](http://www.vmware.com/products/workstation/overview.html) -for Linux and Windows, and -[VMware Player](http://www.vmware.com/products/player/) on Linux. It can -also build machines directly on -[VMware vSphere Hypervisor](http://www.vmware.com/products/vsphere-hypervisor/) -using SSH as opposed to the vSphere API. +This VMware Packer builder is able to create VMware virtual machines from an ISO +file as a source. It currently supports building virtual machines on hosts +running [VMware Fusion](http://www.vmware.com/products/fusion/overview.html) for +OS X, [VMware +Workstation](http://www.vmware.com/products/workstation/overview.html) for Linux +and Windows, and [VMware Player](http://www.vmware.com/products/player/) on +Linux. It can also build machines directly on [VMware vSphere +Hypervisor](http://www.vmware.com/products/vsphere-hypervisor/) using SSH as +opposed to the vSphere API. -The builder builds a virtual machine by creating a new virtual machine -from scratch, booting it, installing an OS, provisioning software within -the OS, then shutting it down. The result of the VMware builder is a directory -containing all the files necessary to run the virtual machine. +The builder builds a virtual machine by creating a new virtual machine from +scratch, booting it, installing an OS, provisioning software within the OS, then +shutting it down. The result of the VMware builder is a directory containing all +the files necessary to run the virtual machine. ## Basic Example -Here is a basic example. This example is not functional. It will start the -OS installer but then fail because we don't provide the preseed file for -Ubuntu to self-install. Still, the example serves to show the basic configuration: +Here is a basic example. This example is not functional. It will start the OS +installer but then fail because we don't provide the preseed file for Ubuntu to +self-install. Still, the example serves to show the basic configuration: -```javascript +``` {.javascript} { "type": "vmware-iso", "iso_url": "http://old-releases.ubuntu.com/releases/precise/ubuntu-12.04.2-server-amd64.iso", @@ -44,261 +47,261 @@ Ubuntu to self-install. Still, the example serves to show the basic configuratio ## Configuration Reference -There are many configuration options available for the VMware builder. -They are organized below into two categories: required and optional. Within -each category, the available options are alphabetized and described. +There are many configuration options available for the VMware builder. They are +organized below into two categories: required and optional. Within each +category, the available options are alphabetized and described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO - files are so large, this is required and Packer will verify it prior - to booting a virtual machine with the ISO attached. The type of the - checksum is specified with `iso_checksum_type`, documented below. +- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO files + are so large, this is required and Packer will verify it prior to booting a + virtual machine with the ISO attached. The type of the checksum is specified + with `iso_checksum_type`, documented below. -* `iso_checksum_type` (string) - The type of the checksum specified in +- `iso_checksum_type` (string) - The type of the checksum specified in `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or "sha512" currently. While "none" will skip checksumming, this is not recommended since ISO files are generally large and corruption does happen from time to time. -* `iso_url` (string) - A URL to the ISO containing the installation image. - This URL can be either an HTTP URL or a file URL (or path to a file). - If this is an HTTP URL, Packer will download it and cache it between - runs. +- `iso_url` (string) - A URL to the ISO containing the installation image. This + URL can be either an HTTP URL or a file URL (or path to a file). If this is an + HTTP URL, Packer will download it and cache it between runs. -* `ssh_username` (string) - The username to use to SSH into the machine - once the OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once the + OS is installed. ### Optional: -* `disk_additional_size` (array of integers) - The size(s) of any additional +- `disk_additional_size` (array of integers) - The size(s) of any additional hard disks for the VM in megabytes. If this is not specified then the VM will only contain a primary hard disk. The builder uses expandable, not fixed-size virtual hard disks, so the actual file representing the disk will not use the full size unless it is full. -* `boot_command` (array of strings) - This is an array of commands to type - when the virtual machine is first booted. The goal of these commands should - be to type just enough to initialize the operating system installer. Special - keys can be typed as well, and are covered in the section below on the boot - command. If this is not specified, it is assumed the installer will start - itself. +- `boot_command` (array of strings) - This is an array of commands to type when + the virtual machine is first booted. The goal of these commands should be to + type just enough to initialize the operating system installer. Special keys + can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -* `boot_wait` (string) - The time to wait after booting the initial virtual +- `boot_wait` (string) - The time to wait after booting the initial virtual machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait - five seconds and one minute 30 seconds, respectively. If this isn't specified, - the default is 10 seconds. + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five + seconds and one minute 30 seconds, respectively. If this isn't specified, the + default is 10 seconds. -* `disk_size` (integer) - The size of the hard disk for the VM in megabytes. - The builder uses expandable, not fixed-size virtual hard disks, so the - actual file representing the disk will not use the full size unless it is full. - By default this is set to 40,000 (about 40 GB). +- `disk_size` (integer) - The size of the hard disk for the VM in megabytes. The + builder uses expandable, not fixed-size virtual hard disks, so the actual file + representing the disk will not use the full size unless it is full. By default + this is set to 40,000 (about 40 GB). -* `disk_type_id` (string) - The type of VMware virtual disk to create. - The default is "1", which corresponds to a growable virtual disk split in - 2GB files. This option is for advanced usage, modify only if you - know what you're doing. For more information, please consult the - [Virtual Disk Manager User's Guide](http://www.vmware.com/pdf/VirtualDiskManager.pdf) - for desktop VMware clients. For ESXi, refer to the proper ESXi documentation. +- `disk_type_id` (string) - The type of VMware virtual disk to create. The + default is "1", which corresponds to a growable virtual disk split in + 2GB files. This option is for advanced usage, modify only if you know what + you're doing. For more information, please consult the [Virtual Disk Manager + User's Guide](http://www.vmware.com/pdf/VirtualDiskManager.pdf) for desktop + VMware clients. For ESXi, refer to the proper ESXi documentation. -* `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful - for unattended Windows installs, which look for an `Autounattend.xml` file - on removable media. By default, no floppy will be attached. All files - listed in this setting get placed into the root directory of the floppy - and the floppy is attached as the first floppy device. Currently, no - support exists for creating sub-directories on the floppy. Wildcard - characters (*, ?, and []) are allowed. Directory names are also allowed, - which will add all the files found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) + are allowed. Directory names are also allowed, which will add all the files + found in the directory to the floppy. -* `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this - is "/Applications/VMware Fusion.app" but this setting allows you to +- `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this is + "/Applications/VMware Fusion.app" but this setting allows you to customize this. -* `guest_os_type` (string) - The guest OS type being installed. This will be - set in the VMware VMX. By default this is "other". By specifying a more specific - OS type, VMware may perform some optimizations or virtual hardware changes - to better support the operating system running in the virtual machine. +- `guest_os_type` (string) - The guest OS type being installed. This will be set + in the VMware VMX. By default this is "other". By specifying a more specific + OS type, VMware may perform some optimizations or virtual hardware changes to + better support the operating system running in the virtual machine. -* `headless` (boolean) - Packer defaults to building VMware - virtual machines by launching a GUI that shows the console of the - machine being built. When this value is set to true, the machine will - start without a console. For VMware machines, Packer will output VNC - connection information in case you need to connect to the console to - debug the build process. +- `headless` (boolean) - Packer defaults to building VMware virtual machines by + launching a GUI that shows the console of the machine being built. When this + value is set to true, the machine will start without a console. For VMware + machines, Packer will output VNC connection information in case you need to + connect to the console to debug the build process. -* `http_directory` (string) - Path to a directory to serve using an HTTP - server. The files in this directory will be available over HTTP that will - be requestable from the virtual machine. This is useful for hosting - kickstart files and so on. By default this is "", which means no HTTP - server will be started. The address and port of the HTTP server will be - available as variables in `boot_command`. This is covered in more detail - below. +- `http_directory` (string) - Path to a directory to serve using an HTTP server. + The files in this directory will be available over HTTP that will be + requestable from the virtual machine. This is useful for hosting kickstart + files and so on. By default this is "", which means no HTTP server will + be started. The address and port of the HTTP server will be available as + variables in `boot_command`. This is covered in more detail below. -* `http_port_min` and `http_port_max` (integer) - These are the minimum and +- `http_port_min` and `http_port_max` (integer) - These are the minimum and maximum port to use for the HTTP server started to serve the `http_directory`. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. - By default the values are 8000 and 9000, respectively. + server to be on one port, make this minimum and maximum port the same. By + default the values are 8000 and 9000, respectively. -* `iso_urls` (array of strings) - Multiple URLs for the ISO to download. - Packer will try these in order. If anything goes wrong attempting to download - or while downloading a single URL, it will move on to the next. All URLs - must point to the same file (same checksum). By default this is empty - and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. +- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer + will try these in order. If anything goes wrong attempting to download or + while downloading a single URL, it will move on to the next. All URLs must + point to the same file (same checksum). By default this is empty and `iso_url` + is used. Only one of `iso_url` or `iso_urls` can be specified. -* `output_directory` (string) - This is the path to the directory where the +- `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running the builder. - By default this is "output-BUILDNAME" where "BUILDNAME" is the name - of the build. + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -* `remote_cache_datastore` (string) - The path to the datastore where - supporting files will be stored during the build on the remote machine. - By default this is the same as the `remote_datastore` option. This only - has an effect if `remote_type` is enabled. +- `remote_cache_datastore` (string) - The path to the datastore where supporting + files will be stored during the build on the remote machine. By default this + is the same as the `remote_datastore` option. This only has an effect if + `remote_type` is enabled. -* `remote_cache_directory` (string) - The path where the ISO and/or floppy - files will be stored during the build on the remote machine. The path is - relative to the `remote_cache_datastore` on the remote machine. By default - this is "packer_cache". This only has an effect if `remote_type` is enabled. +- `remote_cache_directory` (string) - The path where the ISO and/or floppy files + will be stored during the build on the remote machine. The path is relative to + the `remote_cache_datastore` on the remote machine. By default this + is "packer\_cache". This only has an effect if `remote_type` is enabled. -* `remote_datastore` (string) - The path to the datastore where the resulting - VM will be stored when it is built on the remote machine. By default this +- `remote_datastore` (string) - The path to the datastore where the resulting VM + will be stored when it is built on the remote machine. By default this is "datastore1". This only has an effect if `remote_type` is enabled. -* `remote_host` (string) - The host of the remote machine used for access. - This is only required if `remote_type` is enabled. +- `remote_host` (string) - The host of the remote machine used for access. This + is only required if `remote_type` is enabled. -* `remote_password` (string) - The SSH password for the user used to - access the remote machine. By default this is empty. This only has an - effect if `remote_type` is enabled. +- `remote_password` (string) - The SSH password for the user used to access the + remote machine. By default this is empty. This only has an effect if + `remote_type` is enabled. -* `remote_type` (string) - The type of remote machine that will be used to - build this VM rather than a local desktop product. The only value accepted - for this currently is "esx5". If this is not set, a desktop product will be - used. By default, this is not set. +- `remote_type` (string) - The type of remote machine that will be used to build + this VM rather than a local desktop product. The only value accepted for this + currently is "esx5". If this is not set, a desktop product will be used. By + default, this is not set. -* `remote_username` (string) - The username for the SSH user that will access +- `remote_username` (string) - The username for the SSH user that will access the remote machine. This is required if `remote_type` is enabled. -* `shutdown_command` (string) - The command to use to gracefully shut down - the machine once all the provisioning is done. By default this is an empty - string, which tells Packer to just forcefully shut down the machine. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty string, + which tells Packer to just forcefully shut down the machine. -* `shutdown_timeout` (string) - The amount of time to wait after executing - the `shutdown_command` for the virtual machine to actually shut down. - If it doesn't shut down in this time, it is an error. By default, the timeout - is "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -* `skip_compaction` (boolean) - VMware-created disks are defragmented - and compacted at the end of the build process using `vmware-vdiskmanager`. - In certain rare cases, this might actually end up making the resulting disks +- `skip_compaction` (boolean) - VMware-created disks are defragmented and + compacted at the end of the build process using `vmware-vdiskmanager`. In + certain rare cases, this might actually end up making the resulting disks slightly larger. If you find this to be the case, you can disable compaction using this configuration value. -* `tools_upload_flavor` (string) - The flavor of the VMware Tools ISO to - upload into the VM. Valid values are "darwin", "linux", and "windows". - By default, this is empty, which means VMware tools won't be uploaded. +- `tools_upload_flavor` (string) - The flavor of the VMware Tools ISO to upload + into the VM. Valid values are "darwin", "linux", and "windows". By default, + this is empty, which means VMware tools won't be uploaded. -* `tools_upload_path` (string) - The path in the VM to upload the VMware - tools. This only takes effect if `tools_upload_flavor` is non-empty. - This is a [configuration template](/docs/templates/configuration-templates.html) - that has a single valid variable: `Flavor`, which will be the value of - `tools_upload_flavor`. By default the upload path is set to - `{{.Flavor}}.iso`. This setting is not used when `remote_type` is "esx5". +- `tools_upload_path` (string) - The path in the VM to upload the VMware tools. + This only takes effect if `tools_upload_flavor` is non-empty. This is a + [configuration template](/docs/templates/configuration-templates.html) that + has a single valid variable: `Flavor`, which will be the value of + `tools_upload_flavor`. By default the upload path is set to `{{.Flavor}}.iso`. + This setting is not used when `remote_type` is "esx5". -* `version` (string) - The [vmx hardware version](http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1003746) for the new virtual machine. Only the default value has been tested, any other value is experimental. Default value is '9'. +- `version` (string) - The [vmx hardware + version](http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1003746) + for the new virtual machine. Only the default value has been tested, any other + value is experimental. Default value is '9'. -* `vm_name` (string) - This is the name of the VMX file for the new virtual +- `vm_name` (string) - This is the name of the VMX file for the new virtual machine, without the file extension. By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build. -* `vmdk_name` (string) - The filename of the virtual disk that'll be created, +- `vmdk_name` (string) - The filename of the virtual disk that'll be created, without the extension. This defaults to "packer". -* `vmx_data` (object of key/value strings) - Arbitrary key/values - to enter into the virtual machine VMX file. This is for advanced users - who want to set properties such as memory, CPU, etc. +- `vmx_data` (object of key/value strings) - Arbitrary key/values to enter into + the virtual machine VMX file. This is for advanced users who want to set + properties such as memory, CPU, etc. -* `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`, +- `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`, except that it is run after the virtual machine is shutdown, and before the virtual machine is exported. -* `vmx_template_path` (string) - Path to a - [configuration template](/docs/templates/configuration-templates.html) that - defines the contents of the virtual machine VMX file for VMware. This is - for **advanced users only** as this can render the virtual machine - non-functional. See below for more information. For basic VMX modifications, - try `vmx_data` first. +- `vmx_template_path` (string) - Path to a [configuration + template](/docs/templates/configuration-templates.html) that defines the + contents of the virtual machine VMX file for VMware. This is for **advanced + users only** as this can render the virtual machine non-functional. See below + for more information. For basic VMX modifications, try `vmx_data` first. -* `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to - use for VNC access to the virtual machine. The builder uses VNC to type - the initial `boot_command`. Because Packer generally runs in parallel, Packer - uses a randomly chosen port in this range that appears available. By default - this is 5900 to 6000. The minimum and maximum ports are inclusive. +- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to + use for VNC access to the virtual machine. The builder uses VNC to type the + initial `boot_command`. Because Packer generally runs in parallel, Packer uses + a randomly chosen port in this range that appears available. By default this + is 5900 to 6000. The minimum and maximum ports are inclusive. ## Boot Command -The `boot_command` configuration is very important: it specifies the keys -to type when the virtual machine is first booted in order to start the -OS installer. This command is typed after `boot_wait`, which gives the -virtual machine some time to actually load the ISO. +The `boot_command` configuration is very important: it specifies the keys to +type when the virtual machine is first booted in order to start the OS +installer. This command is typed after `boot_wait`, which gives the virtual +machine some time to actually load the ISO. -As documented above, the `boot_command` is an array of strings. The -strings are all typed in sequence. It is an array only to improve readability -within the template. +As documented above, the `boot_command` is an array of strings. The strings are +all typed in sequence. It is an array only to improve readability within the +template. -The boot command is "typed" character for character over a VNC connection -to the machine, simulating a human actually typing the keyboard. There are -a set of special keys available. If these are in your boot command, they -will be replaced by the proper key: +The boot command is "typed" character for character over a VNC connection to the +machine, simulating a human actually typing the keyboard. There are a set of +special keys available. If these are in your boot command, they will be replaced +by the proper key: -* `` - Backspace +- `` - Backspace -* `` - Delete +- `` - Delete -* `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -* `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -* `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -* `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -* `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -* `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -* `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -* `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -* `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -* `` `` `` - Adds a 1, 5 or 10 second pause before sending any additional keys. This - is useful if you have to generally wait for the UI to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before sending + any additional keys. This is useful if you have to generally wait for the UI + to update before typing more. In addition to the special keys, each command to type is treated as a -[configuration template](/docs/templates/configuration-templates.html). -The available variables are: +[configuration template](/docs/templates/configuration-templates.html). The +available variables are: -* `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server - that is started serving the directory specified by the `http_directory` - configuration parameter. If `http_directory` isn't specified, these will - be blank! +- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server that + is started serving the directory specified by the `http_directory` + configuration parameter. If `http_directory` isn't specified, these will be + blank! -Example boot command. This is actually a working boot command used to start -an Ubuntu 12.04 installer: +Example boot command. This is actually a working boot command used to start an +Ubuntu 12.04 installer: -```text +``` {.text} [ "", "/install/vmlinuz noapic ", @@ -314,71 +317,73 @@ an Ubuntu 12.04 installer: ## VMX Template -The heart of a VMware machine is the "vmx" file. This contains all the -virtual hardware metadata necessary for the VM to function. Packer by default -uses a [safe, flexible VMX file](https://github.com/mitchellh/packer/blob/20541a7eda085aa5cf35bfed5069592ca49d106e/builder/vmware/step_create_vmx.go#L84). -But for advanced users, this template can be customized. This allows -Packer to build virtual machines of effectively any guest operating system -type. +The heart of a VMware machine is the "vmx" file. This contains all the virtual +hardware metadata necessary for the VM to function. Packer by default uses a +[safe, flexible VMX +file](https://github.com/mitchellh/packer/blob/20541a7eda085aa5cf35bfed5069592ca49d106e/builder/vmware/step_create_vmx.go#L84). +But for advanced users, this template can be customized. This allows Packer to +build virtual machines of effectively any guest operating system type. -~> **This is an advanced feature.** Modifying the VMX template -can easily cause your virtual machine to not boot properly. Please only -modify the template if you know what you're doing. +\~> **This is an advanced feature.** Modifying the VMX template can easily +cause your virtual machine to not boot properly. Please only modify the template +if you know what you're doing. -Within the template, a handful of variables are available so that your -template can continue working with the rest of the Packer machinery. Using -these variables isn't required, however. +Within the template, a handful of variables are available so that your template +can continue working with the rest of the Packer machinery. Using these +variables isn't required, however. -* `Name` - The name of the virtual machine. -* `GuestOS` - The VMware-valid guest OS type. -* `DiskName` - The filename (without the suffix) of the main virtual disk. -* `ISOPath` - The path to the ISO to use for the OS installation. -* `Version` - The Hardware version VMWare will execute this vm under. Also known as the `virtualhw.version`. +- `Name` - The name of the virtual machine. +- `GuestOS` - The VMware-valid guest OS type. +- `DiskName` - The filename (without the suffix) of the main virtual disk. +- `ISOPath` - The path to the ISO to use for the OS installation. +- `Version` - The Hardware version VMWare will execute this vm under. Also known + as the `virtualhw.version`. ## Building on a Remote vSphere Hypervisor -In addition to using the desktop products of VMware locally to build -virtual machines, Packer can use a remote VMware Hypervisor to build -the virtual machine. +In addition to using the desktop products of VMware locally to build virtual +machines, Packer can use a remote VMware Hypervisor to build the virtual +machine. --> **Note:** Packer supports ESXi 5.1 and above. +-> **Note:** Packer supports ESXi 5.1 and above. -Before using a remote vSphere Hypervisor, you need to enable GuestIPHack by running the following command: +Before using a remote vSphere Hypervisor, you need to enable GuestIPHack by +running the following command: -```text +``` {.text} esxcli system settings advanced set -o /Net/GuestIPHack -i 1 ``` -When using a remote VMware Hypervisor, the builder still downloads the -ISO and various files locally, and uploads these to the remote machine. -Packer currently uses SSH to communicate to the ESXi machine rather than -the vSphere API. At some point, the vSphere API may be used. +When using a remote VMware Hypervisor, the builder still downloads the ISO and +various files locally, and uploads these to the remote machine. Packer currently +uses SSH to communicate to the ESXi machine rather than the vSphere API. At some +point, the vSphere API may be used. -Packer also requires VNC to issue boot commands during a build, -which may be disabled on some remote VMware Hypervisors. Please consult -the appropriate documentation on how to update VMware Hypervisor's firewall -to allow these connections. +Packer also requires VNC to issue boot commands during a build, which may be +disabled on some remote VMware Hypervisors. Please consult the appropriate +documentation on how to update VMware Hypervisor's firewall to allow these +connections. -To use a remote VMware vSphere Hypervisor to build your virtual machine, -fill in the required `remote_*` configurations: +To use a remote VMware vSphere Hypervisor to build your virtual machine, fill in +the required `remote_*` configurations: -* `remote_type` - This must be set to "esx5". +- `remote_type` - This must be set to "esx5". -* `remote_host` - The host of the remote machine. +- `remote_host` - The host of the remote machine. -Additionally, there are some optional configurations that you'll likely -have to modify as well: +Additionally, there are some optional configurations that you'll likely have to +modify as well: -* `remote_datastore` - The path to the datastore where the VM will be - stored on the ESXi machine. +- `remote_datastore` - The path to the datastore where the VM will be stored on + the ESXi machine. -* `remote_cache_datastore` - The path to the datastore where - supporting files will be stored during the build on the remote machine. +- `remote_cache_datastore` - The path to the datastore where supporting files + will be stored during the build on the remote machine. -* `remote_cache_directory` - The path where the ISO and/or floppy - files will be stored during the build on the remote machine. The path is - relative to the `remote_cache_datastore` on the remote machine. +- `remote_cache_directory` - The path where the ISO and/or floppy files will be + stored during the build on the remote machine. The path is relative to the + `remote_cache_datastore` on the remote machine. -* `remote_username` - The SSH username used to access the remote machine. +- `remote_username` - The SSH username used to access the remote machine. -* `remote_password` - The SSH password for access to the remote machine. +- `remote_password` - The SSH password for access to the remote machine. diff --git a/website/source/docs/builders/vmware-vmx.html.markdown b/website/source/docs/builders/vmware-vmx.html.markdown index e28ea3f89..bd1afb83c 100644 --- a/website/source/docs/builders/vmware-vmx.html.markdown +++ b/website/source/docs/builders/vmware-vmx.html.markdown @@ -1,34 +1,37 @@ --- -layout: "docs" -page_title: "VMware Builder from VMX" -description: |- - This VMware Packer builder is able to create VMware virtual machines from an existing VMware virtual machine (a VMX file). It currently supports building virtual machines on hosts running VMware Fusion Professional for OS X, VMware Workstation for Linux and Windows, and VMware Player on Linux. ---- +description: | + This VMware Packer builder is able to create VMware virtual machines from an + existing VMware virtual machine (a VMX file). It currently supports building + virtual machines on hosts running VMware Fusion Professional for OS X, VMware + Workstation for Linux and Windows, and VMware Player on Linux. +layout: docs +page_title: VMware Builder from VMX +... # VMware Builder (from VMX) Type: `vmware-vmx` This VMware Packer builder is able to create VMware virtual machines from an -existing VMware virtual machine (a VMX file). It currently -supports building virtual machines on hosts running -[VMware Fusion Professional](http://www.vmware.com/products/fusion-professional/) for OS X, +existing VMware virtual machine (a VMX file). It currently supports building +virtual machines on hosts running [VMware Fusion +Professional](http://www.vmware.com/products/fusion-professional/) for OS X, [VMware Workstation](http://www.vmware.com/products/workstation/overview.html) -for Linux and Windows, and -[VMware Player](http://www.vmware.com/products/player/) on Linux. +for Linux and Windows, and [VMware +Player](http://www.vmware.com/products/player/) on Linux. -The builder builds a virtual machine by cloning the VMX file using -the clone capabilities introduced in VMware Fusion Professional 6, Workstation 10, -and Player 6. After cloning the VM, it provisions software within the -new machine, shuts it down, and compacts the disks. The resulting folder -contains a new VMware virtual machine. +The builder builds a virtual machine by cloning the VMX file using the clone +capabilities introduced in VMware Fusion Professional 6, Workstation 10, and +Player 6. After cloning the VM, it provisions software within the new machine, +shuts it down, and compacts the disks. The resulting folder contains a new +VMware virtual machine. ## Basic Example -Here is an example. This example is fully functional as long as the source -path points to a real VMX file with the proper settings: +Here is an example. This example is fully functional as long as the source path +points to a real VMX file with the proper settings: -```javascript +``` {.javascript} { "type": "vmware-vmx", "source_path": "/path/to/a/vm.vmx", @@ -40,110 +43,109 @@ path points to a real VMX file with the proper settings: ## Configuration Reference -There are many configuration options available for the VMware builder. -They are organized below into two categories: required and optional. Within -each category, the available options are alphabetized and described. +There are many configuration options available for the VMware builder. They are +organized below into two categories: required and optional. Within each +category, the available options are alphabetized and described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `source_path` (string) - Path to the source VMX file to clone. +- `source_path` (string) - Path to the source VMX file to clone. -* `ssh_username` (string) - The username to use to SSH into the machine - once the OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once the + OS is installed. ### Optional: -* `boot_command` (array of strings) - This is an array of commands to type - when the virtual machine is first booted. The goal of these commands should - be to type just enough to initialize the operating system installer. Special - keys can be typed as well, and are covered in the section below on the boot - command. If this is not specified, it is assumed the installer will start - itself. +- `boot_command` (array of strings) - This is an array of commands to type when + the virtual machine is first booted. The goal of these commands should be to + type just enough to initialize the operating system installer. Special keys + can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -* `boot_wait` (string) - The time to wait after booting the initial virtual +- `boot_wait` (string) - The time to wait after booting the initial virtual machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait - five seconds and one minute 30 seconds, respectively. If this isn't specified, - the default is 10 seconds. + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five + seconds and one minute 30 seconds, respectively. If this isn't specified, the + default is 10 seconds. -* `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful - for unattended Windows installs, which look for an `Autounattend.xml` file - on removable media. By default, no floppy will be attached. All files - listed in this setting get placed into the root directory of the floppy - and the floppy is attached as the first floppy device. Currently, no - support exists for creating sub-directories on the floppy. Wildcard - characters (*, ?, and []) are allowed. Directory names are also allowed, - which will add all the files found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) + are allowed. Directory names are also allowed, which will add all the files + found in the directory to the floppy. -* `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this - is "/Applications/VMware Fusion.app" but this setting allows you to +- `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this is + "/Applications/VMware Fusion.app" but this setting allows you to customize this. -* `headless` (boolean) - Packer defaults to building VMware - virtual machines by launching a GUI that shows the console of the - machine being built. When this value is set to true, the machine will - start without a console. For VMware machines, Packer will output VNC - connection information in case you need to connect to the console to - debug the build process. +- `headless` (boolean) - Packer defaults to building VMware virtual machines by + launching a GUI that shows the console of the machine being built. When this + value is set to true, the machine will start without a console. For VMware + machines, Packer will output VNC connection information in case you need to + connect to the console to debug the build process. -* `http_directory` (string) - Path to a directory to serve using an HTTP - server. The files in this directory will be available over HTTP that will - be requestable from the virtual machine. This is useful for hosting - kickstart files and so on. By default this is "", which means no HTTP - server will be started. The address and port of the HTTP server will be - available as variables in `boot_command`. This is covered in more detail - below. +- `http_directory` (string) - Path to a directory to serve using an HTTP server. + The files in this directory will be available over HTTP that will be + requestable from the virtual machine. This is useful for hosting kickstart + files and so on. By default this is "", which means no HTTP server will + be started. The address and port of the HTTP server will be available as + variables in `boot_command`. This is covered in more detail below. -* `http_port_min` and `http_port_max` (integer) - These are the minimum and +- `http_port_min` and `http_port_max` (integer) - These are the minimum and maximum port to use for the HTTP server started to serve the `http_directory`. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. - By default the values are 8000 and 9000, respectively. + server to be on one port, make this minimum and maximum port the same. By + default the values are 8000 and 9000, respectively. -* `output_directory` (string) - This is the path to the directory where the +- `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running the builder. - By default this is "output-BUILDNAME" where "BUILDNAME" is the name - of the build. + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -* `shutdown_command` (string) - The command to use to gracefully shut down the machine once all - the provisioning is done. By default this is an empty string, which tells Packer to just - forcefully shut down the machine unless a shutdown command takes place inside script so this may - safely be omitted. If one or more scripts require a reboot it is suggested to leave this blank - since reboots may fail and specify the final shutdown command in your last script. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty string, + which tells Packer to just forcefully shut down the machine unless a shutdown + command takes place inside script so this may safely be omitted. If one or + more scripts require a reboot it is suggested to leave this blank since + reboots may fail and specify the final shutdown command in your last script. -* `shutdown_timeout` (string) - The amount of time to wait after executing - the `shutdown_command` for the virtual machine to actually shut down. - If it doesn't shut down in this time, it is an error. By default, the timeout - is "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -* `skip_compaction` (boolean) - VMware-created disks are defragmented - and compacted at the end of the build process using `vmware-vdiskmanager`. - In certain rare cases, this might actually end up making the resulting disks +- `skip_compaction` (boolean) - VMware-created disks are defragmented and + compacted at the end of the build process using `vmware-vdiskmanager`. In + certain rare cases, this might actually end up making the resulting disks slightly larger. If you find this to be the case, you can disable compaction using this configuration value. -* `vm_name` (string) - This is the name of the VMX file for the new virtual +- `vm_name` (string) - This is the name of the VMX file for the new virtual machine, without the file extension. By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build. -* `vmx_data` (object of key/value strings) - Arbitrary key/values - to enter into the virtual machine VMX file. This is for advanced users - who want to set properties such as memory, CPU, etc. +- `vmx_data` (object of key/value strings) - Arbitrary key/values to enter into + the virtual machine VMX file. This is for advanced users who want to set + properties such as memory, CPU, etc. -* `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`, +- `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`, except that it is run after the virtual machine is shutdown, and before the virtual machine is exported. -* `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to - use for VNC access to the virtual machine. The builder uses VNC to type - the initial `boot_command`. Because Packer generally runs in parallel, Packer - uses a randomly chosen port in this range that appears available. By default - this is 5900 to 6000. The minimum and maximum ports are inclusive. +- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to + use for VNC access to the virtual machine. The builder uses VNC to type the + initial `boot_command`. Because Packer generally runs in parallel, Packer uses + a randomly chosen port in this range that appears available. By default this + is 5900 to 6000. The minimum and maximum ports are inclusive. diff --git a/website/source/docs/builders/vmware.html.markdown b/website/source/docs/builders/vmware.html.markdown index 84d94a369..e77fe574a 100644 --- a/website/source/docs/builders/vmware.html.markdown +++ b/website/source/docs/builders/vmware.html.markdown @@ -1,27 +1,28 @@ --- -layout: "docs" -page_title: "VMware Builder" -description: |- - The VMware Packer builder is able to create VMware virtual machines for use with any VMware product. ---- +description: | + The VMware Packer builder is able to create VMware virtual machines for use with + any VMware product. +layout: docs +page_title: VMware Builder +... # VMware Builder -The VMware Packer builder is able to create VMware virtual machines for use -with any VMware product. +The VMware Packer builder is able to create VMware virtual machines for use with +any VMware product. -Packer actually comes with multiple builders able to create VMware -machines, depending on the strategy you want to use to build the image. -Packer supports the following VMware builders: +Packer actually comes with multiple builders able to create VMware machines, +depending on the strategy you want to use to build the image. Packer supports +the following VMware builders: -* [vmware-iso](/docs/builders/vmware-iso.html) - Starts from - an ISO file, creates a brand new VMware VM, installs an OS, - provisions software within the OS, then exports that machine to create - an image. This is best for people who want to start from scratch. +- [vmware-iso](/docs/builders/vmware-iso.html) - Starts from an ISO file, + creates a brand new VMware VM, installs an OS, provisions software within the + OS, then exports that machine to create an image. This is best for people who + want to start from scratch. -* [vmware-vmx](/docs/builders/vmware-vmx.html) - This builder - imports an existing VMware machine (from a VMX file), runs provisioners - on top of that VM, and exports that machine to create an image. - This is best if you have an existing VMware VM you want to use as the - source. As an additional benefit, you can feed the artifact of this - builder back into Packer to iterate on a machine. +- [vmware-vmx](/docs/builders/vmware-vmx.html) - This builder imports an + existing VMware machine (from a VMX file), runs provisioners on top of that + VM, and exports that machine to create an image. This is best if you have an + existing VMware VM you want to use as the source. As an additional benefit, + you can feed the artifact of this builder back into Packer to iterate on + a machine. diff --git a/website/source/docs/command-line/build.html.markdown b/website/source/docs/command-line/build.html.markdown index bada564a1..92afda570 100644 --- a/website/source/docs/command-line/build.html.markdown +++ b/website/source/docs/command-line/build.html.markdown @@ -1,37 +1,40 @@ --- -layout: "docs" -page_title: "Build - Command-Line" -description: |- - The `packer build` Packer command takes a template and runs all the builds within it in order to generate a set of artifacts. The various builds specified within a template are executed in parallel, unless otherwise specified. And the artifacts that are created will be outputted at the end of the build. ---- +description: | + The `packer build` Packer command takes a template and runs all the builds + within it in order to generate a set of artifacts. The various builds specified + within a template are executed in parallel, unless otherwise specified. And the + artifacts that are created will be outputted at the end of the build. +layout: docs +page_title: 'Build - Command-Line' +... # Command-Line: Build -The `packer build` Packer command takes a template and runs all the builds within -it in order to generate a set of artifacts. The various builds specified within -a template are executed in parallel, unless otherwise specified. And the +The `packer build` Packer command takes a template and runs all the builds +within it in order to generate a set of artifacts. The various builds specified +within a template are executed in parallel, unless otherwise specified. And the artifacts that are created will be outputted at the end of the build. ## Options -* `-color=false` - Disables colorized output. Enabled by default. +- `-color=false` - Disables colorized output. Enabled by default. -* `-debug` - Disables parallelization and enables debug mode. Debug mode flags +- `-debug` - Disables parallelization and enables debug mode. Debug mode flags the builders that they should output debugging information. The exact behavior of debug mode is left to the builder. In general, builders usually will stop - between each step, waiting for keyboard input before continuing. This will allow - the user to inspect state and so on. + between each step, waiting for keyboard input before continuing. This will + allow the user to inspect state and so on. -* `-except=foo,bar,baz` - Builds all the builds except those with the given +- `-except=foo,bar,baz` - Builds all the builds except those with the given comma-separated names. Build names by default are the names of their builders, unless a specific `name` attribute is specified within the configuration. -* `-force` - Forces a builder to run when artifacts from a previous build prevent - a build from running. The exact behavior of a forced build is left to the builder. - In general, a builder supporting the forced build will remove the artifacts from - the previous build. This will allow the user to repeat a build without having to - manually clean these artifacts beforehand. +- `-force` - Forces a builder to run when artifacts from a previous build + prevent a build from running. The exact behavior of a forced build is left to + the builder. In general, a builder supporting the forced build will remove the + artifacts from the previous build. This will allow the user to repeat a build + without having to manually clean these artifacts beforehand. -* `-only=foo,bar,baz` - Only build the builds with the given comma-separated - names. Build names by default are the names of their builders, unless a - specific `name` attribute is specified within the configuration. +- `-only=foo,bar,baz` - Only build the builds with the given + comma-separated names. Build names by default are the names of their builders, + unless a specific `name` attribute is specified within the configuration. diff --git a/website/source/docs/command-line/fix.html.markdown b/website/source/docs/command-line/fix.html.markdown index 958ebd0f8..eb383fec6 100644 --- a/website/source/docs/command-line/fix.html.markdown +++ b/website/source/docs/command-line/fix.html.markdown @@ -1,33 +1,34 @@ --- -layout: "docs" -page_title: "Fix - Command-Line" -description: |- - The `packer fix` Packer command takes a template and finds backwards incompatible parts of it and brings it up to date so it can be used with the latest version of Packer. After you update to a new Packer release, you should run the fix command to make sure your templates work with the new release. ---- +description: | + The `packer fix` Packer command takes a template and finds backwards + incompatible parts of it and brings it up to date so it can be used with the + latest version of Packer. After you update to a new Packer release, you should + run the fix command to make sure your templates work with the new release. +layout: docs +page_title: 'Fix - Command-Line' +... # Command-Line: Fix -The `packer fix` Packer command takes a template and finds backwards incompatible -parts of it and brings it up to date so it can be used with the latest version -of Packer. After you update to a new Packer release, you should run the -fix command to make sure your templates work with the new release. +The `packer fix` Packer command takes a template and finds backwards +incompatible parts of it and brings it up to date so it can be used with the +latest version of Packer. After you update to a new Packer release, you should +run the fix command to make sure your templates work with the new release. -The fix command will output the changed template to standard out, so you -should redirect standard using standard OS-specific techniques if you want to -save it to a file. For example, on Linux systems, you may want to do this: +The fix command will output the changed template to standard out, so you should +redirect standard using standard OS-specific techniques if you want to save it +to a file. For example, on Linux systems, you may want to do this: -``` -$ packer fix old.json > new.json -``` + $ packer fix old.json > new.json -If fixing fails for any reason, the fix command will exit with a non-zero -exit status. Error messages appear on standard error, so if you're redirecting +If fixing fails for any reason, the fix command will exit with a non-zero exit +status. Error messages appear on standard error, so if you're redirecting output, you'll still see error messages. --> **Even when Packer fix doesn't do anything** to the template, -the template will be outputted to standard out. Things such as configuration -key ordering and indentation may be changed. The output format however, is -pretty-printed for human readability. +-> **Even when Packer fix doesn't do anything** to the template, the template +will be outputted to standard out. Things such as configuration key ordering and +indentation may be changed. The output format however, is pretty-printed for +human readability. -The full list of fixes that the fix command performs is visible in the -help output, which can be seen via `packer fix -h`. +The full list of fixes that the fix command performs is visible in the help +output, which can be seen via `packer fix -h`. diff --git a/website/source/docs/command-line/inspect.html.markdown b/website/source/docs/command-line/inspect.html.markdown index 09f979208..a1a86e3e5 100644 --- a/website/source/docs/command-line/inspect.html.markdown +++ b/website/source/docs/command-line/inspect.html.markdown @@ -1,33 +1,35 @@ --- -layout: "docs" -page_title: "Inspect - Command-Line" -description: |- - The `packer inspect` Packer command takes a template and outputs the various components a template defines. This can help you quickly learn about a template without having to dive into the JSON itself. The command will tell you things like what variables a template accepts, the builders it defines, the provisioners it defines and the order they'll run, and more. ---- +description: | + The `packer inspect` Packer command takes a template and outputs the various + components a template defines. This can help you quickly learn about a template + without having to dive into the JSON itself. The command will tell you things + like what variables a template accepts, the builders it defines, the + provisioners it defines and the order they'll run, and more. +layout: docs +page_title: 'Inspect - Command-Line' +... # Command-Line: Inspect -The `packer inspect` Packer command takes a template and outputs the various components -a template defines. This can help you quickly learn about a template without -having to dive into the JSON itself. -The command will tell you things like what variables a template accepts, -the builders it defines, the provisioners it defines and the order they'll -run, and more. +The `packer inspect` Packer command takes a template and outputs the various +components a template defines. This can help you quickly learn about a template +without having to dive into the JSON itself. The command will tell you things +like what variables a template accepts, the builders it defines, the +provisioners it defines and the order they'll run, and more. -This command is extra useful when used with -[machine-readable output](/docs/command-line/machine-readable.html) enabled. -The command outputs the components in a way that is parseable by machines. +This command is extra useful when used with [machine-readable +output](/docs/command-line/machine-readable.html) enabled. The command outputs +the components in a way that is parseable by machines. -The command doesn't validate the actual configuration of the various -components (that is what the `validate` command is for), but it will -validate the syntax of your template by necessity. +The command doesn't validate the actual configuration of the various components +(that is what the `validate` command is for), but it will validate the syntax of +your template by necessity. ## Usage Example -Given a basic template, here is an example of what the output might -look like: +Given a basic template, here is an example of what the output might look like: -```text +``` {.text} $ packer inspect template.json Variables and their defaults: diff --git a/website/source/docs/command-line/introduction.html.markdown b/website/source/docs/command-line/introduction.html.markdown index 0a97e0056..ea9834397 100644 --- a/website/source/docs/command-line/introduction.html.markdown +++ b/website/source/docs/command-line/introduction.html.markdown @@ -1,24 +1,27 @@ --- -layout: "docs" -page_title: "Packer Command-Line" -description: |- - Packer is controlled using a command-line interface. All interaction with Packer is done via the `packer` tool. Like many other command-line tools, the `packer` tool takes a subcommand to execute, and that subcommand may have additional options as well. Subcommands are executed with `packer SUBCOMMAND`, where "SUBCOMMAND" is obviously the actual command you wish to execute. ---- +description: | + Packer is controlled using a command-line interface. All interaction with Packer + is done via the `packer` tool. Like many other command-line tools, the `packer` + tool takes a subcommand to execute, and that subcommand may have additional + options as well. Subcommands are executed with `packer SUBCOMMAND`, where + "SUBCOMMAND" is obviously the actual command you wish to execute. +layout: docs +page_title: 'Packer Command-Line' +... # Packer Command-Line -Packer is controlled using a command-line interface. All interaction with -Packer is done via the `packer` tool. Like many other command-line tools, -the `packer` tool takes a subcommand to execute, and that subcommand may -have additional options as well. Subcommands are executed with -`packer SUBCOMMAND`, where "SUBCOMMAND" is obviously the actual command you wish -to execute. +Packer is controlled using a command-line interface. All interaction with Packer +is done via the `packer` tool. Like many other command-line tools, the `packer` +tool takes a subcommand to execute, and that subcommand may have additional +options as well. Subcommands are executed with `packer SUBCOMMAND`, where +"SUBCOMMAND" is obviously the actual command you wish to execute. If you run `packer` by itself, help will be displayed showing all available subcommands and a brief synopsis of what they do. In addition to this, you can -run any `packer` command with the `-h` flag to output more detailed help for -a specific subcommand. +run any `packer` command with the `-h` flag to output more detailed help for a +specific subcommand. -In addition to the documentation available on the command-line, each command -is documented on this website. You can find the documentation for a specific +In addition to the documentation available on the command-line, each command is +documented on this website. You can find the documentation for a specific subcommand using the navigation to the left. diff --git a/website/source/docs/command-line/machine-readable.html.markdown b/website/source/docs/command-line/machine-readable.html.markdown index 5fed33310..550a14f35 100644 --- a/website/source/docs/command-line/machine-readable.html.markdown +++ b/website/source/docs/command-line/machine-readable.html.markdown @@ -1,30 +1,33 @@ --- -layout: "docs" -page_title: "Machine-Readable Output - Command-Line" -description: |- - By default, the output of Packer is very human-readable. It uses nice formatting, spacing, and colors in order to make Packer a pleasure to use. However, Packer was built with automation in mind. To that end, Packer supports a fully machine-readable output setting, allowing you to use Packer in automated environments. ---- +description: | + By default, the output of Packer is very human-readable. It uses nice + formatting, spacing, and colors in order to make Packer a pleasure to use. + However, Packer was built with automation in mind. To that end, Packer supports + a fully machine-readable output setting, allowing you to use Packer in automated + environments. +layout: docs +page_title: 'Machine-Readable Output - Command-Line' +... # Machine-Readable Output By default, the output of Packer is very human-readable. It uses nice formatting, spacing, and colors in order to make Packer a pleasure to use. -However, Packer was built with automation in mind. To that end, Packer -supports a fully machine-readable output setting, allowing you to use -Packer in automated environments. +However, Packer was built with automation in mind. To that end, Packer supports +a fully machine-readable output setting, allowing you to use Packer in automated +environments. -The machine-readable output format is easy to use and read and was made -with Unix tools in mind, so it is awk/sed/grep/etc. friendly. +The machine-readable output format is easy to use and read and was made with +Unix tools in mind, so it is awk/sed/grep/etc. friendly. ## Enabling The machine-readable output format can be enabled by passing the -`-machine-readable` flag to any Packer command. This immediately enables -all output to become machine-readable on stdout. Logging, if enabled, -continues to appear on stderr. An example of the output is shown -below: +`-machine-readable` flag to any Packer command. This immediately enables all +output to become machine-readable on stdout. Logging, if enabled, continues to +appear on stderr. An example of the output is shown below: -```text +``` {.text} $ packer -machine-readable version 1376289459,,version,0.2.4 1376289459,,version-prerelease, @@ -32,54 +35,50 @@ $ packer -machine-readable version 1376289459,,ui,say,Packer v0.2.4.dev (eed6ece+CHANGES) ``` -The format will be covered in more detail later. But as you can see, -the output immediately becomes machine-friendly. Try some other commands -with the `-machine-readable` flag to see! +The format will be covered in more detail later. But as you can see, the output +immediately becomes machine-friendly. Try some other commands with the +`-machine-readable` flag to see! ## Format -The machine readable format is a line-oriented, comma-delimited text -format. This makes it extremely easy to parse using standard Unix tools such -as awk or grep in addition to full programming languages like Ruby or -Python. +The machine readable format is a line-oriented, comma-delimited text format. +This makes it extremely easy to parse using standard Unix tools such as awk or +grep in addition to full programming languages like Ruby or Python. The format is: -```text +``` {.text} timestamp,target,type,data... ``` Each component is explained below: -* **timestamp** is a Unix timestamp in UTC of when the message was - printed. +- **timestamp** is a Unix timestamp in UTC of when the message was printed. -* **target** is the target of the following output. This is empty if - the message is related to Packer globally. Otherwise, this is generally - a build name so you can relate output to a specific build while parallel - builds are running. +- **target** is the target of the following output. This is empty if the message + is related to Packer globally. Otherwise, this is generally a build name so + you can relate output to a specific build while parallel builds are running. -* **type** is the type of machine-readable message being outputted. There - are a set of standard types which are covered later, but each component - of Packer (builders, provisioners, etc.) may output their own custom types - as well, allowing the machine-readable output to be infinitely flexible. +- **type** is the type of machine-readable message being outputted. There are a + set of standard types which are covered later, but each component of Packer + (builders, provisioners, etc.) may output their own custom types as well, + allowing the machine-readable output to be infinitely flexible. -* **data** is zero or more comma-seperated values associated with the prior - type. The exact amount and meaning of this data is type-dependent, so you - must read the documentation associated with the type to understand fully. +- **data** is zero or more comma-seperated values associated with the + prior type. The exact amount and meaning of this data is type-dependent, so + you must read the documentation associated with the type to understand fully. Within the format, if data contains a comma, it is replaced with -`%!(PACKER_COMMA)`. This was preferred over an escape character such as -`\'` because it is more friendly to tools like awk. +`%!(PACKER_COMMA)`. This was preferred over an escape character such as `\'` +because it is more friendly to tools like awk. -Newlines within the format are replaced with their respective standard -escape sequence. Newlines become a literal `\n` within the output. Carriage -returns become a literal `\r`. +Newlines within the format are replaced with their respective standard escape +sequence. Newlines become a literal `\n` within the output. Carriage returns +become a literal `\r`. ## Message Types -The set of machine-readable message types can be found in the -[machine-readable format](/docs/machine-readable/index.html) -complete documentation section. This section contains documentation -on all the message types exposed by Packer core as well as all the -components that ship with Packer by default. +The set of machine-readable message types can be found in the [machine-readable +format](/docs/machine-readable/index.html) complete documentation section. This +section contains documentation on all the message types exposed by Packer core +as well as all the components that ship with Packer by default. diff --git a/website/source/docs/command-line/push.html.markdown b/website/source/docs/command-line/push.html.markdown index 5833b917f..0cc9699f5 100644 --- a/website/source/docs/command-line/push.html.markdown +++ b/website/source/docs/command-line/push.html.markdown @@ -1,9 +1,10 @@ --- -layout: "docs" -page_title: "Push - Command-Line" -description: |- - The `packer push` Packer command takes a template and pushes it to a build service that will automatically build this Packer template. ---- +description: | + The `packer push` Packer command takes a template and pushes it to a build + service that will automatically build this Packer template. +layout: docs +page_title: 'Push - Command-Line' +... # Command-Line: Push @@ -16,36 +17,36 @@ External build services such as HashiCorp's Atlas make it easy to iterate on Packer templates, especially when the builder you are running may not be easily accessable (such as developing `qemu` builders on Mac or Windows). -!> The Packer build service will receive the raw copy of your Packer template +!> The Packer build service will receive the raw copy of your Packer template when you push. **If you have sensitive data in your Packer template, you should move that data into Packer variables or environment variables!** -For the `push` command to work, the [push configuration](/docs/templates/push.html) -must be completed within the template. +For the `push` command to work, the [push +configuration](/docs/templates/push.html) must be completed within the template. ## Options -* `-message` - A message to identify the purpose or changes in this Packer +- `-message` - A message to identify the purpose or changes in this Packer template much like a VCS commit message. This message will be passed to the Packer build service. This option is also available as a short option `-m`. -* `-token` - An access token for authenticating the push to the Packer build +- `-token` - An access token for authenticating the push to the Packer build service such as Atlas. This can also be specified within the push configuration in the template. -* `-name` - The name of the build in the service. This typically - looks like `hashicorp/precise64`. +- `-name` - The name of the build in the service. This typically looks like + `hashicorp/precise64`. ## Examples Push a Packer template: -```shell +``` {.shell} $ packer push -m "Updating the apache version" template.json ``` Push a Packer template with a custom token: -```shell +``` {.shell} $ packer push -token ABCD1234 template.json ``` diff --git a/website/source/docs/command-line/validate.html.markdown b/website/source/docs/command-line/validate.html.markdown index 530f00295..e17f23dc4 100644 --- a/website/source/docs/command-line/validate.html.markdown +++ b/website/source/docs/command-line/validate.html.markdown @@ -1,20 +1,24 @@ --- -layout: "docs" -page_title: "Validate - Command-Line" -description: |- - The `packer validate` Packer command is used to validate the syntax and configuration of a template. The command will return a zero exit status on success, and a non-zero exit status on failure. Additionally, if a template doesn't validate, any error messages will be outputted. ---- +description: | + The `packer validate` Packer command is used to validate the syntax and + configuration of a template. The command will return a zero exit status on + success, and a non-zero exit status on failure. Additionally, if a template + doesn't validate, any error messages will be outputted. +layout: docs +page_title: 'Validate - Command-Line' +... # Command-Line: Validate -The `packer validate` Packer command is used to validate the syntax and configuration -of a [template](/docs/templates/introduction.html). The command will return -a zero exit status on success, and a non-zero exit status on failure. Additionally, -if a template doesn't validate, any error messages will be outputted. +The `packer validate` Packer command is used to validate the syntax and +configuration of a [template](/docs/templates/introduction.html). The command +will return a zero exit status on success, and a non-zero exit status on +failure. Additionally, if a template doesn't validate, any error messages will +be outputted. Example usage: -```text +``` {.text} $ packer validate my-template.json Template validation failed. Errors are shown below. @@ -25,5 +29,5 @@ Errors validating build 'vmware'. 1 error(s) occurred: ## Options -* `-syntax-only` - Only the syntax of the template is checked. The configuration +- `-syntax-only` - Only the syntax of the template is checked. The configuration is not validated. diff --git a/website/source/docs/extend/builder.html.markdown b/website/source/docs/extend/builder.html.markdown index a841d5c3d..41a83bcef 100644 --- a/website/source/docs/extend/builder.html.markdown +++ b/website/source/docs/extend/builder.html.markdown @@ -1,167 +1,170 @@ --- -layout: "docs" -page_title: "Custom Builder - Extend Packer" -description: |- - Packer Builders are the components of Packer responsible for creating a machine, bringing it to a point where it can be provisioned, and then turning that provisioned machine into some sort of machine image. Several builders are officially distributed with Packer itself, such as the AMI builder, the VMware builder, etc. However, it is possible to write custom builders using the Packer plugin interface, and this page documents how to do that. ---- +description: | + Packer Builders are the components of Packer responsible for creating a machine, + bringing it to a point where it can be provisioned, and then turning that + provisioned machine into some sort of machine image. Several builders are + officially distributed with Packer itself, such as the AMI builder, the VMware + builder, etc. However, it is possible to write custom builders using the Packer + plugin interface, and this page documents how to do that. +layout: docs +page_title: 'Custom Builder - Extend Packer' +... # Custom Builder Development Packer Builders are the components of Packer responsible for creating a machine, -bringing it to a point where it can be provisioned, and then turning -that provisioned machine into some sort of machine image. Several builders -are officially distributed with Packer itself, such as the AMI builder, the -VMware builder, etc. However, it is possible to write custom builders using -the Packer plugin interface, and this page documents how to do that. +bringing it to a point where it can be provisioned, and then turning that +provisioned machine into some sort of machine image. Several builders are +officially distributed with Packer itself, such as the AMI builder, the VMware +builder, etc. However, it is possible to write custom builders using the Packer +plugin interface, and this page documents how to do that. -Prior to reading this page, it is assumed you have read the page on -[plugin development basics](/docs/extend/developing-plugins.html). +Prior to reading this page, it is assumed you have read the page on [plugin +development basics](/docs/extend/developing-plugins.html). -~> **Warning!** This is an advanced topic. If you're new to Packer, we +\~> **Warning!** This is an advanced topic. If you're new to Packer, we recommend getting a bit more comfortable before you dive into writing plugins. ## The Interface The interface that must be implemented for a builder is the `packer.Builder` -interface. It is reproduced below for easy reference. The actual interface -in the source code contains some basic documentation as well explaining -what each method should do. +interface. It is reproduced below for easy reference. The actual interface in +the source code contains some basic documentation as well explaining what each +method should do. -```go +``` {.go} type Builder interface { - Prepare(...interface{}) error - Run(ui Ui, hook Hook, cache Cache) (Artifact, error) - Cancel() + Prepare(...interface{}) error + Run(ui Ui, hook Hook, cache Cache) (Artifact, error) + Cancel() } ``` ### The "Prepare" Method -The `Prepare` method for each builder is called prior to any runs with -the configuration that was given in the template. This is passed in as -an array of `interface{}` types, but is generally `map[string]interface{}`. The prepare +The `Prepare` method for each builder is called prior to any runs with the +configuration that was given in the template. This is passed in as an array of +`interface{}` types, but is generally `map[string]interface{}`. The prepare method is responsible for translating this configuration into an internal structure, validating it, and returning any errors. For multiple parameters, they should be merged together into the final -configuration, with later parameters overwriting any previous configuration. -The exact semantics of the merge are left to the builder author. +configuration, with later parameters overwriting any previous configuration. The +exact semantics of the merge are left to the builder author. For decoding the `interface{}` into a meaningful structure, the -[mapstructure](https://github.com/mitchellh/mapstructure) library is recommended. -Mapstructure will take an `interface{}` and decode it into an arbitrarily -complex struct. If there are any errors, it generates very human friendly -errors that can be returned directly from the prepare method. +[mapstructure](https://github.com/mitchellh/mapstructure) library is +recommended. Mapstructure will take an `interface{}` and decode it into an +arbitrarily complex struct. If there are any errors, it generates very human +friendly errors that can be returned directly from the prepare method. -While it is not actively enforced, **no side effects** should occur from -running the `Prepare` method. Specifically, don't create files, don't launch -virtual machines, etc. Prepare's purpose is solely to configure the builder -and validate the configuration. +While it is not actively enforced, **no side effects** should occur from running +the `Prepare` method. Specifically, don't create files, don't launch virtual +machines, etc. Prepare's purpose is solely to configure the builder and validate +the configuration. -In addition to normal configuration, Packer will inject a `map[string]interface{}` -with a key of `packer.DebugConfigKey` set to boolean `true` if debug mode -is enabled for the build. If this is set to true, then the builder -should enable a debug mode which assists builder developers and advanced -users to introspect what is going on during a build. During debug -builds, parallelism is strictly disabled, so it is safe to request input -from stdin and so on. +In addition to normal configuration, Packer will inject a +`map[string]interface{}` with a key of `packer.DebugConfigKey` set to boolean +`true` if debug mode is enabled for the build. If this is set to true, then the +builder should enable a debug mode which assists builder developers and advanced +users to introspect what is going on during a build. During debug builds, +parallelism is strictly disabled, so it is safe to request input from stdin and +so on. ### The "Run" Method -`Run` is where all the interesting stuff happens. Run is executed, often -in parallel for multiple builders, to actually build the machine, provision -it, and create the resulting machine image, which is returned as an -implementation of the `packer.Artifact` interface. +`Run` is where all the interesting stuff happens. Run is executed, often in +parallel for multiple builders, to actually build the machine, provision it, and +create the resulting machine image, which is returned as an implementation of +the `packer.Artifact` interface. The `Run` method takes three parameters. These are all very useful. The -`packer.Ui` object is used to send output to the console. `packer.Hook` is -used to execute hooks, which are covered in more detail in the hook section -below. And `packer.Cache` is used to store files between multiple Packer -runs, and is covered in more detail in the cache section below. +`packer.Ui` object is used to send output to the console. `packer.Hook` is used +to execute hooks, which are covered in more detail in the hook section below. +And `packer.Cache` is used to store files between multiple Packer runs, and is +covered in more detail in the cache section below. Because builder runs are typically a complex set of many steps, the -[multistep](https://github.com/mitchellh/multistep) library is recommended -to bring order to the complexity. Multistep is a library which allows you to -separate your logic into multiple distinct "steps" and string them together. -It fully supports cancellation mid-step and so on. Please check it out, it is -how the built-in builders are all implemented. +[multistep](https://github.com/mitchellh/multistep) library is recommended to +bring order to the complexity. Multistep is a library which allows you to +separate your logic into multiple distinct "steps" and string them together. It +fully supports cancellation mid-step and so on. Please check it out, it is how +the built-in builders are all implemented. -Finally, as a result of `Run`, an implementation of `packer.Artifact` should -be returned. More details on creating a `packer.Artifact` are covered in the -artifact section below. If something goes wrong during the build, an error -can be returned, as well. Note that it is perfectly fine to produce no artifact -and no error, although this is rare. +Finally, as a result of `Run`, an implementation of `packer.Artifact` should be +returned. More details on creating a `packer.Artifact` are covered in the +artifact section below. If something goes wrong during the build, an error can +be returned, as well. Note that it is perfectly fine to produce no artifact and +no error, although this is rare. ### The "Cancel" Method -The `Run` method is often run in parallel. The `Cancel` method can be -called at any time and requests cancellation of any builder run in progress. -This method should block until the run actually stops. +The `Run` method is often run in parallel. The `Cancel` method can be called at +any time and requests cancellation of any builder run in progress. This method +should block until the run actually stops. -Cancels are most commonly triggered by external interrupts, such as the -user pressing `Ctrl-C`. Packer will only exit once all the builders clean up, -so it is important that you architect your builder in a way that it is quick -to respond to these cancellations and clean up after itself. +Cancels are most commonly triggered by external interrupts, such as the user +pressing `Ctrl-C`. Packer will only exit once all the builders clean up, so it +is important that you architect your builder in a way that it is quick to +respond to these cancellations and clean up after itself. ## Creating an Artifact The `Run` method is expected to return an implementation of the -`packer.Artifact` interface. Each builder must create their own -implementation. The interface is very simple and the documentation on the -interface is quite clear. +`packer.Artifact` interface. Each builder must create their own implementation. +The interface is very simple and the documentation on the interface is quite +clear. -The only part of an artifact that may be confusing is the `BuilderId` -method. This method must return an absolutely unique ID for the builder. -In general, I follow the practice of making the ID contain my GitHub username -and then the platform it is building for. For example, the builder ID of -the VMware builder is "mitchellh.vmware" or something similar. +The only part of an artifact that may be confusing is the `BuilderId` method. +This method must return an absolutely unique ID for the builder. In general, I +follow the practice of making the ID contain my GitHub username and then the +platform it is building for. For example, the builder ID of the VMware builder +is "mitchellh.vmware" or something similar. -Post-processors use the builder ID value in order to make some assumptions -about the artifact results, so it is important it never changes. +Post-processors use the builder ID value in order to make some assumptions about +the artifact results, so it is important it never changes. -Other than the builder ID, the rest should be self-explanatory by reading -the [packer.Artifact interface documentation](#). +Other than the builder ID, the rest should be self-explanatory by reading the +[packer.Artifact interface documentation](#). ## Provisioning Packer has built-in support for provisioning, but the moment when provisioning -runs must be invoked by the builder itself, since only the builder knows -when the machine is running and ready for communication. +runs must be invoked by the builder itself, since only the builder knows when +the machine is running and ready for communication. When the machine is ready to be provisioned, run the `packer.HookProvision` hook, making sure the communicator is not nil, since this is required for provisioners. An example of calling the hook is shown below: -```go +``` {.go} hook.Run(packer.HookProvision, ui, comm, nil) ``` -At this point, Packer will run the provisioners and no additional work -is necessary. +At this point, Packer will run the provisioners and no additional work is +necessary. --> **Note:** Hooks are still undergoing thought around their -general design and will likely change in a future version. They aren't -fully "baked" yet, so they aren't documented here other than to tell you -how to hook in provisioners. +-> **Note:** Hooks are still undergoing thought around their general design +and will likely change in a future version. They aren't fully "baked" yet, so +they aren't documented here other than to tell you how to hook in provisioners. ## Caching Files -It is common for some builders to deal with very large files, or files that -take a long time to generate. For example, the VMware builder has the capability -to download the operating system ISO from the internet. This is timely process, -so it would be convenient to cache the file. This sort of caching is a core -part of Packer that is exposed to builders. +It is common for some builders to deal with very large files, or files that take +a long time to generate. For example, the VMware builder has the capability to +download the operating system ISO from the internet. This is timely process, so +it would be convenient to cache the file. This sort of caching is a core part of +Packer that is exposed to builders. The cache interface is `packer.Cache`. It behaves much like a Go -[RWMutex](http://golang.org/pkg/sync/#RWMutex). The builder requests a "lock" -on certain cache keys, and is given exclusive access to that key for the -duration of the lock. This locking mechanism allows multiple builders to -share cache data even though they're running in parallel. +[RWMutex](http://golang.org/pkg/sync/#RWMutex). The builder requests a "lock" on +certain cache keys, and is given exclusive access to that key for the duration +of the lock. This locking mechanism allows multiple builders to share cache data +even though they're running in parallel. For example, both the VMware and VirtualBox builders support downloading an operating system ISO from the internet. Most of the time, this ISO is identical. The locking mechanisms of the cache allow one of the builders to download it only once, but allow both builders to share the downloaded file. -The [documentation for packer.Cache](#) is -very detailed in how it works. +The [documentation for packer.Cache](#) is very detailed in how it works. diff --git a/website/source/docs/extend/command.html.markdown b/website/source/docs/extend/command.html.markdown index 1a4625c9b..2d611ebf4 100644 --- a/website/source/docs/extend/command.html.markdown +++ b/website/source/docs/extend/command.html.markdown @@ -1,57 +1,57 @@ --- -layout: "docs" -page_title: "Custom Command Development" -description: |- - Packer Commands are the components of Packer that add functionality to the `packer` application. Packer comes with a set of commands out of the box, such as `build`. Commands are invoked as `packer `. Custom commands allow you to add new commands to Packer to perhaps perform new functionality. ---- +description: | + Packer Commands are the components of Packer that add functionality to the + `packer` application. Packer comes with a set of commands out of the box, such + as `build`. Commands are invoked as `packer `. Custom commands allow + you to add new commands to Packer to perhaps perform new functionality. +layout: docs +page_title: Custom Command Development +... # Custom Command Development Packer Commands are the components of Packer that add functionality to the -`packer` application. Packer comes with a set of commands out of the -box, such as `build`. Commands are invoked as `packer `. -Custom commands allow you to add new commands to Packer to perhaps -perform new functionality. +`packer` application. Packer comes with a set of commands out of the box, such +as `build`. Commands are invoked as `packer `. Custom commands allow +you to add new commands to Packer to perhaps perform new functionality. -Prior to reading this page, it is assumed you have read the page on -[plugin development basics](/docs/extend/developing-plugins.html). +Prior to reading this page, it is assumed you have read the page on [plugin +development basics](/docs/extend/developing-plugins.html). -Command plugins implement the `packer.Command` interface and are served -using the `plugin.ServeCommand` function. Commands actually have no control -over what keyword invokes the command with the `packer` binary. The keyword -to invoke the command depends on how the plugin is installed and configured -in the core Packer configuration. +Command plugins implement the `packer.Command` interface and are served using +the `plugin.ServeCommand` function. Commands actually have no control over what +keyword invokes the command with the `packer` binary. The keyword to invoke the +command depends on how the plugin is installed and configured in the core Packer +configuration. -~> **Warning!** This is an advanced topic. If you're new to Packer, we +\~> **Warning!** This is an advanced topic. If you're new to Packer, we recommend getting a bit more comfortable before you dive into writing plugins. ## The Interface The interface that must be implemented for a command is the `packer.Command` -interface. It is reproduced below for easy reference. The actual interface -in the source code contains some basic documentation as well explaining -what each method should do. +interface. It is reproduced below for easy reference. The actual interface in +the source code contains some basic documentation as well explaining what each +method should do. -```go +``` {.go} type Command interface { - Help() string - Run(env Environment, args []string) int - Synopsis() string + Help() string + Run(env Environment, args []string) int + Synopsis() string } ``` ### The "Help" Method -The `Help` method returns long-form help. This help is most commonly -shown when a command is invoked with the `--help` or `-h` option. -The help should document all the available command line flags, purpose -of the command, etc. +The `Help` method returns long-form help. This help is most commonly shown when +a command is invoked with the `--help` or `-h` option. The help should document +all the available command line flags, purpose of the command, etc. -Packer commands generally follow the following format for help, but -it is not required. You're allowed to make the help look like anything -you please. +Packer commands generally follow the following format for help, but it is not +required. You're allowed to make the help look like anything you please. -```text +``` {.text} Usage: packer COMMAND [options] ARGS... Brief one or two sentence about the function of the command. @@ -64,23 +64,23 @@ Options: ### The "Run" Method -`Run` is what is called when the command is actually invoked. It is given -the `packer.Environment`, which has access to almost all components of -the current Packer run, such as UI, builders, other plugins, etc. In addition -to the environment, the remaining command line args are given. These command -line args have already been stripped of the command name, so they can be -passed directly into something like the standard Go `flag` package for -command-line flag parsing. +`Run` is what is called when the command is actually invoked. It is given the +`packer.Environment`, which has access to almost all components of the current +Packer run, such as UI, builders, other plugins, etc. In addition to the +environment, the remaining command line args are given. These command line args +have already been stripped of the command name, so they can be passed directly +into something like the standard Go `flag` package for command-line flag +parsing. -The return value of `Run` is the exit status for the command. If everything -ran successfully, this should be 0. If any errors occurred, it should be any +The return value of `Run` is the exit status for the command. If everything ran +successfully, this should be 0. If any errors occurred, it should be any positive integer. ### The "Synopsis" Method -The `Synopsis` method should return a short single-line description -of what the command does. This is used when `packer` is invoked on its own -in order to show a brief summary of the commands that Packer supports. +The `Synopsis` method should return a short single-line description of what the +command does. This is used when `packer` is invoked on its own in order to show +a brief summary of the commands that Packer supports. -The synopsis should be no longer than around 50 characters, since it is -already appearing on a line with other text. +The synopsis should be no longer than around 50 characters, since it is already +appearing on a line with other text. diff --git a/website/source/docs/extend/developing-plugins.html.markdown b/website/source/docs/extend/developing-plugins.html.markdown index 4e38c27a3..2ccdd437f 100644 --- a/website/source/docs/extend/developing-plugins.html.markdown +++ b/website/source/docs/extend/developing-plugins.html.markdown @@ -1,73 +1,75 @@ --- -layout: "docs" -page_title: "Developing Plugins" -description: |- - This page will document how you can develop your own Packer plugins. Prior to reading this, it is assumed that you're comfortable with Packer and also know the basics of how Plugins work, from a user standpoint. ---- +description: | + This page will document how you can develop your own Packer plugins. Prior to + reading this, it is assumed that you're comfortable with Packer and also know + the basics of how Plugins work, from a user standpoint. +layout: docs +page_title: Developing Plugins +... # Developing Plugins -This page will document how you can develop your own Packer plugins. -Prior to reading this, it is assumed that you're comfortable with Packer -and also know the [basics of how Plugins work](/docs/extend/plugins.html), -from a user standpoint. +This page will document how you can develop your own Packer plugins. Prior to +reading this, it is assumed that you're comfortable with Packer and also know +the [basics of how Plugins work](/docs/extend/plugins.html), from a user +standpoint. Packer plugins must be written in [Go](http://golang.org/), so it is also -assumed that you're familiar with the language. This page will not be a -Go language tutorial. Thankfully, if you are familiar with Go, the Go toolchain +assumed that you're familiar with the language. This page will not be a Go +language tutorial. Thankfully, if you are familiar with Go, the Go toolchain makes it extremely easy to develop Packer plugins. -~> **Warning!** This is an advanced topic. If you're new to Packer, we +\~> **Warning!** This is an advanced topic. If you're new to Packer, we recommend getting a bit more comfortable before you dive into writing plugins. ## Plugin System Architecture Packer has a fairly unique plugin architecture. Instead of loading plugins -directly into a running application, Packer runs each plugin as a -_separate application_. Inter-process communication and RPC is then used -to communicate between the many running Packer processes. Packer core -itself is responsible for orchestrating the processes and handles cleanup. +directly into a running application, Packer runs each plugin as a *separate +application*. Inter-process communication and RPC is then used to communicate +between the many running Packer processes. Packer core itself is responsible for +orchestrating the processes and handles cleanup. The beauty of this is that your plugin can have any dependencies it wants. Dependencies don't need to line up with what Packer core or any other plugin -uses, because they're completely isolated into the process space of the -plugin itself. +uses, because they're completely isolated into the process space of the plugin +itself. -And, thanks to Go's [interfaces](http://golang.org/doc/effective_go.html#interfaces_and_types), -it doesn't even look like inter-process communication is occurring. You just -use the interfaces like normal, but in fact they're being executed in -a remote process. Pretty cool. +And, thanks to Go's +[interfaces](http://golang.org/doc/effective_go.html#interfaces_and_types), it +doesn't even look like inter-process communication is occurring. You just use +the interfaces like normal, but in fact they're being executed in a remote +process. Pretty cool. ## Plugin Development Basics -Developing a plugin is quite simple. All the various kinds of plugins -have a corresponding interface. The plugin simply needs to implement -this interface and expose it using the Packer plugin package (covered here shortly), -and that's it! +Developing a plugin is quite simple. All the various kinds of plugins have a +corresponding interface. The plugin simply needs to implement this interface and +expose it using the Packer plugin package (covered here shortly), and that's it! -There are two packages that really matter that every plugin must use. -Other than the following two packages, you're encouraged to use whatever -packages you want. Because plugins are their own processes, there is -no danger of colliding dependencies. +There are two packages that really matter that every plugin must use. Other than +the following two packages, you're encouraged to use whatever packages you want. +Because plugins are their own processes, there is no danger of colliding +dependencies. -* `github.com/mitchellh/packer` - Contains all the interfaces that you - have to implement for any given plugin. +- `github.com/mitchellh/packer` - Contains all the interfaces that you have to + implement for any given plugin. -* `github.com/mitchellh/packer/plugin` - Contains the code to serve the - plugin. This handles all the inter-process communication stuff. +- `github.com/mitchellh/packer/plugin` - Contains the code to serve the plugin. + This handles all the inter-process communication stuff. There are two steps involved in creating a plugin: -1. Implement the desired interface. For example, if you're building a - builder plugin, implement the `packer.Builder` interface. +1. Implement the desired interface. For example, if you're building a builder + plugin, implement the `packer.Builder` interface. -2. Serve the interface by calling the appropriate plugin serving method - in your main method. In the case of a builder, this is `plugin.ServeBuilder`. +2. Serve the interface by calling the appropriate plugin serving method in your + main method. In the case of a builder, this is `plugin.ServeBuilder`. A basic example is shown below. In this example, assume the `Builder` struct implements the `packer.Builder` interface: -```go +``` {.go} import ( "github.com/mitchellh/packer/plugin" ) @@ -76,40 +78,38 @@ import ( type Builder struct{} func main() { - plugin.ServeBuilder(new(Builder)) + plugin.ServeBuilder(new(Builder)) } ``` **That's it!** `plugin.ServeBuilder` handles all the nitty gritty of -communicating with Packer core and serving your builder over RPC. It -can't get much easier than that. +communicating with Packer core and serving your builder over RPC. It can't get +much easier than that. -Next, just build your plugin like a normal Go application, using `go build` -or however you please. The resulting binary is the plugin that can be -installed using standard installation procedures. +Next, just build your plugin like a normal Go application, using `go build` or +however you please. The resulting binary is the plugin that can be installed +using standard installation procedures. -The specifics of how to implement each type of interface are covered -in the relevant subsections available in the navigation to the left. +The specifics of how to implement each type of interface are covered in the +relevant subsections available in the navigation to the left. -~> **Lock your dependencies!** Unfortunately, Go's dependency -management story is fairly sad. There are various unofficial methods out -there for locking dependencies, and using one of them is highly recommended -since the Packer codebase will continue to improve, potentially breaking -APIs along the way until there is a stable release. By locking your dependencies, -your plugins will continue to work with the version of Packer you lock to. +\~> **Lock your dependencies!** Unfortunately, Go's dependency management +story is fairly sad. There are various unofficial methods out there for locking +dependencies, and using one of them is highly recommended since the Packer +codebase will continue to improve, potentially breaking APIs along the way until +there is a stable release. By locking your dependencies, your plugins will +continue to work with the version of Packer you lock to. ## Logging and Debugging -Plugins can use the standard Go `log` package to log. Anything logged -using this will be available in the Packer log files automatically. -The Packer log is visible on stderr when the `PACKER_LOG` environmental -is set. +Plugins can use the standard Go `log` package to log. Anything logged using this +will be available in the Packer log files automatically. The Packer log is +visible on stderr when the `PACKER_LOG` environmental is set. -Packer will prefix any logs from plugins with the path to that plugin -to make it identifiable where the logs come from. Some example logs are -shown below: +Packer will prefix any logs from plugins with the path to that plugin to make it +identifiable where the logs come from. Some example logs are shown below: -```text +``` {.text} 2013/06/10 21:44:43 ui: Available commands are: 2013/06/10 21:44:43 Loading command: build 2013/06/10 21:44:43 packer-command-build: 2013/06/10 21:44:43 Plugin minimum port: 10000 @@ -117,31 +117,31 @@ shown below: 2013/06/10 21:44:43 packer-command-build: 2013/06/10 21:44:43 Plugin address: :10000 ``` -As you can see, the log messages from the "build" command plugin are -prefixed with "packer-command-build". Log output is _extremely_ helpful -in debugging issues and you're encouraged to be as verbose as you need to -be in order for the logs to be helpful. +As you can see, the log messages from the "build" command plugin are prefixed +with "packer-command-build". Log output is *extremely* helpful in debugging +issues and you're encouraged to be as verbose as you need to be in order for the +logs to be helpful. ## Plugin Development Tips -Here are some tips for developing plugins, often answering common questions -or concerns. +Here are some tips for developing plugins, often answering common questions or +concerns. ### Naming Conventions -It is standard practice to name the resulting plugin application -in the format of `packer-TYPE-NAME`. For example, if you're building a -new builder for CustomCloud, it would be standard practice to name the -resulting plugin `packer-builder-custom-cloud`. This naming convention -helps users identify the purpose of a plugin. +It is standard practice to name the resulting plugin application in the format +of `packer-TYPE-NAME`. For example, if you're building a new builder for +CustomCloud, it would be standard practice to name the resulting plugin +`packer-builder-custom-cloud`. This naming convention helps users identify the +purpose of a plugin. ### Testing Plugins -While developing plugins, you can configure your Packer configuration -to point directly to the compiled plugin in order to test it. For example, -building the CustomCloud plugin, I may configure packer like so: +While developing plugins, you can configure your Packer configuration to point +directly to the compiled plugin in order to test it. For example, building the +CustomCloud plugin, I may configure packer like so: -```javascript +``` {.javascript} { "builders": { "custom-cloud": "/an/absolute/path/to/packer-builder-custom-cloud" @@ -149,13 +149,13 @@ building the CustomCloud plugin, I may configure packer like so: } ``` -This would configure Packer to have the "custom-cloud" plugin, and execute -the binary that I am building during development. This is extremely useful -during development. +This would configure Packer to have the "custom-cloud" plugin, and execute the +binary that I am building during development. This is extremely useful during +development. ### Distributing Plugins -It is recommended you use a tool like [goxc](https://github.com/laher/goxc) -in order to cross-compile your plugin for every platform that Packer supports, -since Go applications are platform-specific. goxc will allow you to build -for every platform from your own computer. +It is recommended you use a tool like [goxc](https://github.com/laher/goxc) in +order to cross-compile your plugin for every platform that Packer supports, +since Go applications are platform-specific. goxc will allow you to build for +every platform from your own computer. diff --git a/website/source/docs/extend/plugins.html.markdown b/website/source/docs/extend/plugins.html.markdown index c257fb702..f8b800a30 100644 --- a/website/source/docs/extend/plugins.html.markdown +++ b/website/source/docs/extend/plugins.html.markdown @@ -1,68 +1,71 @@ --- -layout: "docs" -page_title: "Packer Plugins - Extend Packer" -description: |- - Packer Plugins allow new functionality to be added to Packer without modifying the core source code. Packer plugins are able to add new commands, builders, provisioners, hooks, and more. In fact, much of Packer itself is implemented by writing plugins that are simply distributed with Packer. For example, all the commands, builders, provisioners, and more that ship with Packer are implemented as Plugins that are simply hardcoded to load with Packer. ---- +description: | + Packer Plugins allow new functionality to be added to Packer without modifying + the core source code. Packer plugins are able to add new commands, builders, + provisioners, hooks, and more. In fact, much of Packer itself is implemented by + writing plugins that are simply distributed with Packer. For example, all the + commands, builders, provisioners, and more that ship with Packer are implemented + as Plugins that are simply hardcoded to load with Packer. +layout: docs +page_title: 'Packer Plugins - Extend Packer' +... # Packer Plugins -Packer Plugins allow new functionality to be added to Packer without -modifying the core source code. Packer plugins are able to add new -commands, builders, provisioners, hooks, and more. In fact, much of Packer -itself is implemented by writing plugins that are simply distributed with -Packer. For example, all the commands, builders, provisioners, and more -that ship with Packer are implemented as Plugins that are simply hardcoded -to load with Packer. +Packer Plugins allow new functionality to be added to Packer without modifying +the core source code. Packer plugins are able to add new commands, builders, +provisioners, hooks, and more. In fact, much of Packer itself is implemented by +writing plugins that are simply distributed with Packer. For example, all the +commands, builders, provisioners, and more that ship with Packer are implemented +as Plugins that are simply hardcoded to load with Packer. -This page will cover how to install and use plugins. If you're interested -in developing plugins, the documentation for that is available the -[developing plugins](/docs/extend/developing-plugins.html) page. +This page will cover how to install and use plugins. If you're interested in +developing plugins, the documentation for that is available the [developing +plugins](/docs/extend/developing-plugins.html) page. -Because Packer is so young, there is no official listing of available -Packer plugins. Plugins are best found via Google. Typically, searching -"packer plugin _x_" will find what you're looking for if it exists. As -Packer gets older, an official plugin directory is planned. +Because Packer is so young, there is no official listing of available Packer +plugins. Plugins are best found via Google. Typically, searching "packer plugin +*x*" will find what you're looking for if it exists. As Packer gets older, an +official plugin directory is planned. ## How Plugins Work -Packer plugins are completely separate, standalone applications that the -core of Packer starts and communicates with. +Packer plugins are completely separate, standalone applications that the core of +Packer starts and communicates with. -These plugin applications aren't meant to be run manually. Instead, Packer core executes -these plugin applications in a certain way and communicates with them. +These plugin applications aren't meant to be run manually. Instead, Packer core +executes these plugin applications in a certain way and communicates with them. For example, the VMware builder is actually a standalone binary named -`packer-builder-vmware`. The next time you run a Packer build, look at -your process list and you should see a handful of `packer-` prefixed -applications running. +`packer-builder-vmware`. The next time you run a Packer build, look at your +process list and you should see a handful of `packer-` prefixed applications +running. ## Installing Plugins -The easiest way to install a plugin is to name it correctly, then place -it in the proper directory. To name a plugin correctly, make sure the -binary is named `packer-TYPE-NAME`. For example, `packer-builder-amazon-ebs` -for a "builder" type plugin named "amazon-ebs". Valid types for plugins -are down this page more. +The easiest way to install a plugin is to name it correctly, then place it in +the proper directory. To name a plugin correctly, make sure the binary is named +`packer-TYPE-NAME`. For example, `packer-builder-amazon-ebs` for a "builder" +type plugin named "amazon-ebs". Valid types for plugins are down this page more. -Once the plugin is named properly, Packer automatically discovers plugins -in the following directories in the given order. If a conflicting plugin is -found later, it will take precedence over one found earlier. +Once the plugin is named properly, Packer automatically discovers plugins in the +following directories in the given order. If a conflicting plugin is found +later, it will take precedence over one found earlier. -1. The directory where `packer` is, or the executable directory. +1. The directory where `packer` is, or the executable directory. -2. `~/.packer.d/plugins` on Unix systems or `%APPDATA%/packer.d/plugins` on - Windows. +2. `~/.packer.d/plugins` on Unix systems or `%APPDATA%/packer.d/plugins` + on Windows. -3. The current working directory. +3. The current working directory. The valid types for plugins are: -* `builder` - Plugins responsible for building images for a specific platform. +- `builder` - Plugins responsible for building images for a specific platform. -* `command` - A CLI sub-command for `packer`. +- `command` - A CLI sub-command for `packer`. -* `post-processor` - A post-processor responsible for taking an artifact - from a builder and turning it into something else. +- `post-processor` - A post-processor responsible for taking an artifact from a + builder and turning it into something else. -* `provisioner` - A provisioner to install software on images created by - a builder. +- `provisioner` - A provisioner to install software on images created by + a builder. diff --git a/website/source/docs/extend/post-processor.html.markdown b/website/source/docs/extend/post-processor.html.markdown index 204cc593b..1120bc31d 100644 --- a/website/source/docs/extend/post-processor.html.markdown +++ b/website/source/docs/extend/post-processor.html.markdown @@ -1,92 +1,89 @@ --- -layout: "docs" -page_title: "Custom Post-Processor Development" -description: |- - Packer Post-processors are the components of Packer that transform one artifact into another, for example by compressing files, or uploading them. ---- +description: | + Packer Post-processors are the components of Packer that transform one artifact + into another, for example by compressing files, or uploading them. +layout: docs +page_title: 'Custom Post-Processor Development' +... # Custom Post-Processor Development Packer Post-processors are the components of Packer that transform one artifact into another, for example by compressing files, or uploading them. -In the compression example, the transformation would be taking an artifact -with a set of files, compressing those files, and returning a new -artifact with only a single file (the compressed archive). For the -upload example, the transformation would be taking an artifact with -some set of files, uploading those files, and returning an artifact -with a single ID: the URL of the upload. +In the compression example, the transformation would be taking an artifact with +a set of files, compressing those files, and returning a new artifact with only +a single file (the compressed archive). For the upload example, the +transformation would be taking an artifact with some set of files, uploading +those files, and returning an artifact with a single ID: the URL of the upload. -Prior to reading this page, it is assumed you have read the page on -[plugin development basics](/docs/extend/developing-plugins.html). +Prior to reading this page, it is assumed you have read the page on [plugin +development basics](/docs/extend/developing-plugins.html). -Post-processor plugins implement the `packer.PostProcessor` interface and -are served using the `plugin.ServePostProcessor` function. +Post-processor plugins implement the `packer.PostProcessor` interface and are +served using the `plugin.ServePostProcessor` function. -~> **Warning!** This is an advanced topic. If you're new to Packer, we +\~> **Warning!** This is an advanced topic. If you're new to Packer, we recommend getting a bit more comfortable before you dive into writing plugins. - ## The Interface The interface that must be implemented for a post-processor is the -`packer.PostProcessor` interface. It is reproduced below for easy reference. -The actual interface in the source code contains some basic documentation as well explaining -what each method should do. +`packer.PostProcessor` interface. It is reproduced below for easy reference. The +actual interface in the source code contains some basic documentation as well +explaining what each method should do. -```go +``` {.go} type PostProcessor interface { - Configure(interface{}) error - PostProcess(Ui, Artifact) (a Artifact, keep bool, err error) + Configure(interface{}) error + PostProcess(Ui, Artifact) (a Artifact, keep bool, err error) } ``` ### The "Configure" Method -The `Configure` method for each post-processor is called early in the -build process to configure the post-processor. The configuration is passed -in as a raw `interface{}`. The configure method is responsible for translating -this configuration into an internal structure, validating it, and returning -any errors. +The `Configure` method for each post-processor is called early in the build +process to configure the post-processor. The configuration is passed in as a raw +`interface{}`. The configure method is responsible for translating this +configuration into an internal structure, validating it, and returning any +errors. For decoding the `interface{}` into a meaningful structure, the [mapstructure](https://github.com/mitchellh/mapstructure) library is recommended. Mapstructure will take an `interface{}` and decode it into an arbitrarily complex struct. If there are any errors, it generates very -human-friendly errors that can be returned directly from the configure -method. +human-friendly errors that can be returned directly from the configure method. -While it is not actively enforced, **no side effects** should occur from -running the `Configure` method. Specifically, don't create files, don't -create network connections, etc. Configure's purpose is solely to setup -internal state and validate the configuration as much as possible. +While it is not actively enforced, **no side effects** should occur from running +the `Configure` method. Specifically, don't create files, don't create network +connections, etc. Configure's purpose is solely to setup internal state and +validate the configuration as much as possible. -`Configure` being run is not an indication that `PostProcess` will ever -run. For example, `packer validate` will run `Configure` to verify the -configuration validates, but will never actually run the build. +`Configure` being run is not an indication that `PostProcess` will ever run. For +example, `packer validate` will run `Configure` to verify the configuration +validates, but will never actually run the build. ### The "PostProcess" Method -The `PostProcess` method is where the real work goes. PostProcess is -responsible for taking one `packer.Artifact` implementation, and transforming -it into another. +The `PostProcess` method is where the real work goes. PostProcess is responsible +for taking one `packer.Artifact` implementation, and transforming it into +another. When we say "transform," we don't mean actually modifying the existing -`packer.Artifact` value itself. We mean taking the contents of the artifact -and creating a new artifact from that. For example, if we were creating -a "compress" post-processor that is responsible for compressing files, -the transformation would be taking the `Files()` from the original artifact, -compressing them, and creating a new artifact with a single file: the -compressed archive. +`packer.Artifact` value itself. We mean taking the contents of the artifact and +creating a new artifact from that. For example, if we were creating a "compress" +post-processor that is responsible for compressing files, the transformation +would be taking the `Files()` from the original artifact, compressing them, and +creating a new artifact with a single file: the compressed archive. -The result signature of this method is `(Artifact, bool, error)`. Each -return value is explained below: +The result signature of this method is `(Artifact, bool, error)`. Each return +value is explained below: -* `Artifact` - The newly created artifact if no errors occurred. -* `bool` - If true, the input artifact will forcefully be kept. By default, +- `Artifact` - The newly created artifact if no errors occurred. +- `bool` - If true, the input artifact will forcefully be kept. By default, Packer typically deletes all input artifacts, since the user doesn't generally want intermediary artifacts. However, some post-processors depend on the previous artifact existing. If this is `true`, it forces packer to keep the artifact around. -* `error` - Non-nil if there was an error in any way. If this is the case, - the other two return values are ignored. +- `error` - Non-nil if there was an error in any way. If this is the case, the + other two return values are ignored. diff --git a/website/source/docs/extend/provisioner.html.markdown b/website/source/docs/extend/provisioner.html.markdown index cb73cccd2..a06940dac 100644 --- a/website/source/docs/extend/provisioner.html.markdown +++ b/website/source/docs/extend/provisioner.html.markdown @@ -1,90 +1,95 @@ --- -layout: "docs" -page_title: "Custom Provisioner Development" -description: |- - Packer Provisioners are the components of Packer that install and configure software into a running machine prior to turning that machine into an image. An example of a provisioner is the shell provisioner, which runs shell scripts within the machines. ---- +description: | + Packer Provisioners are the components of Packer that install and configure + software into a running machine prior to turning that machine into an image. An + example of a provisioner is the shell provisioner, which runs shell scripts + within the machines. +layout: docs +page_title: Custom Provisioner Development +... # Custom Provisioner Development Packer Provisioners are the components of Packer that install and configure -software into a running machine prior to turning that machine into an -image. An example of a provisioner is the [shell provisioner](/docs/provisioners/shell.html), -which runs shell scripts within the machines. +software into a running machine prior to turning that machine into an image. An +example of a provisioner is the [shell +provisioner](/docs/provisioners/shell.html), which runs shell scripts within the +machines. -Prior to reading this page, it is assumed you have read the page on -[plugin development basics](/docs/extend/developing-plugins.html). +Prior to reading this page, it is assumed you have read the page on [plugin +development basics](/docs/extend/developing-plugins.html). -Provisioner plugins implement the `packer.Provisioner` interface and -are served using the `plugin.ServeProvisioner` function. +Provisioner plugins implement the `packer.Provisioner` interface and are served +using the `plugin.ServeProvisioner` function. -~> **Warning!** This is an advanced topic. If you're new to Packer, we +\~> **Warning!** This is an advanced topic. If you're new to Packer, we recommend getting a bit more comfortable before you dive into writing plugins. ## The Interface The interface that must be implemented for a provisioner is the -`packer.Provisioner` interface. It is reproduced below for easy reference. -The actual interface in the source code contains some basic documentation as well explaining -what each method should do. +`packer.Provisioner` interface. It is reproduced below for easy reference. The +actual interface in the source code contains some basic documentation as well +explaining what each method should do. -```go +``` {.go} type Provisioner interface { - Prepare(...interface{}) error - Provision(Ui, Communicator) error + Prepare(...interface{}) error + Provision(Ui, Communicator) error } ``` ### The "Prepare" Method -The `Prepare` method for each provisioner is called prior to any runs with -the configuration that was given in the template. This is passed in as -an array of `interface{}` types, but is generally `map[string]interface{}`. The prepare +The `Prepare` method for each provisioner is called prior to any runs with the +configuration that was given in the template. This is passed in as an array of +`interface{}` types, but is generally `map[string]interface{}`. The prepare method is responsible for translating this configuration into an internal structure, validating it, and returning any errors. For multiple parameters, they should be merged together into the final -configuration, with later parameters overwriting any previous configuration. -The exact semantics of the merge are left to the builder author. +configuration, with later parameters overwriting any previous configuration. The +exact semantics of the merge are left to the builder author. For decoding the `interface{}` into a meaningful structure, the -[mapstructure](https://github.com/mitchellh/mapstructure) library is recommended. -Mapstructure will take an `interface{}` and decode it into an arbitrarily -complex struct. If there are any errors, it generates very human friendly -errors that can be returned directly from the prepare method. +[mapstructure](https://github.com/mitchellh/mapstructure) library is +recommended. Mapstructure will take an `interface{}` and decode it into an +arbitrarily complex struct. If there are any errors, it generates very human +friendly errors that can be returned directly from the prepare method. -While it is not actively enforced, **no side effects** should occur from -running the `Prepare` method. Specifically, don't create files, don't launch -virtual machines, etc. Prepare's purpose is solely to configure the builder -and validate the configuration. +While it is not actively enforced, **no side effects** should occur from running +the `Prepare` method. Specifically, don't create files, don't launch virtual +machines, etc. Prepare's purpose is solely to configure the builder and validate +the configuration. -The `Prepare` method is called very early in the build process so that -errors may be displayed to the user before anything actually happens. +The `Prepare` method is called very early in the build process so that errors +may be displayed to the user before anything actually happens. ### The "Provision" Method -The `Provision` method is called when a machine is running and ready -to be provisioned. The provisioner should do its real work here. +The `Provision` method is called when a machine is running and ready to be +provisioned. The provisioner should do its real work here. -The method takes two parameters: a `packer.Ui` and a `packer.Communicator`. -The UI can be used to communicate with the user what is going on. The -communicator is used to communicate with the running machine, and is -guaranteed to be connected at this point. +The method takes two parameters: a `packer.Ui` and a `packer.Communicator`. The +UI can be used to communicate with the user what is going on. The communicator +is used to communicate with the running machine, and is guaranteed to be +connected at this point. The provision method should not return until provisioning is complete. ## Using the Communicator -The `packer.Communicator` parameter and interface is used to communicate -with running machine. The machine may be local (in a virtual machine or -container of some sort) or it may be remote (in a cloud). The communicator -interface abstracts this away so that communication is the same overall. +The `packer.Communicator` parameter and interface is used to communicate with +running machine. The machine may be local (in a virtual machine or container of +some sort) or it may be remote (in a cloud). The communicator interface +abstracts this away so that communication is the same overall. -The documentation around the [code itself](https://github.com/mitchellh/packer/blob/master/packer/communicator.go) -is really great as an overview of how to use the interface. You should begin -by reading this. Once you have read it, you can see some example usage below: +The documentation around the [code +itself](https://github.com/mitchellh/packer/blob/master/packer/communicator.go) +is really great as an overview of how to use the interface. You should begin by +reading this. Once you have read it, you can see some example usage below: -```go +``` {.go} // Build the remote command. var cmd packer.RemoteCmd cmd.Command = "echo foo" diff --git a/website/source/docs/index.html.markdown b/website/source/docs/index.html.markdown index 5894d17db..cf924d688 100644 --- a/website/source/docs/index.html.markdown +++ b/website/source/docs/index.html.markdown @@ -1,13 +1,16 @@ --- -layout: "docs" -page_title: "Packer Documentation" -description: |- - Welcome to the Packer documentation! This documentation is more of a reference guide for all available features and options in Packer. If you're just getting started with Packer, please start with the introduction and getting started guide instead. ---- +description: | + Welcome to the Packer documentation! This documentation is more of a reference + guide for all available features and options in Packer. If you're just getting + started with Packer, please start with the introduction and getting started + guide instead. +layout: docs +page_title: Packer Documentation +... # Packer Documentation Welcome to the Packer documentation! This documentation is more of a reference guide for all available features and options in Packer. If you're just getting -started with Packer, please start with the -[introduction and getting started guide](/intro) instead. +started with Packer, please start with the [introduction and getting started +guide](/intro) instead. diff --git a/website/source/docs/installation.html.markdown b/website/source/docs/installation.html.markdown index b24078729..35af3ed93 100644 --- a/website/source/docs/installation.html.markdown +++ b/website/source/docs/installation.html.markdown @@ -1,44 +1,48 @@ --- -layout: "docs" -page_title: "Install Packer" -description: |- - Packer must first be installed on the machine you want to run it on. To make installation easy, Packer is distributed as a binary package for all supported platforms and architectures. This page will not cover how to compile Packer from source, as that is covered in the README and is only recommended for advanced users. ---- +description: | + Packer must first be installed on the machine you want to run it on. To make + installation easy, Packer is distributed as a binary package for all supported + platforms and architectures. This page will not cover how to compile Packer from + source, as that is covered in the README and is only recommended for advanced + users. +layout: docs +page_title: Install Packer +... # Install Packer -Packer must first be installed on the machine you want to run it on. -To make installation easy, Packer is distributed as a [binary package](/downloads.html) -for all supported platforms and architectures. This page will not cover how -to compile Packer from source, as that is covered in the +Packer must first be installed on the machine you want to run it on. To make +installation easy, Packer is distributed as a [binary package](/downloads.html) +for all supported platforms and architectures. This page will not cover how to +compile Packer from source, as that is covered in the [README](https://github.com/mitchellh/packer/blob/master/README.md) and is only recommended for advanced users. ## Installing Packer -To install packer, first find the [appropriate package](/downloads.html) -for your system and download it. Packer is packaged as a "zip" file. +To install packer, first find the [appropriate package](/downloads.html) for +your system and download it. Packer is packaged as a "zip" file. Next, unzip the downloaded package into a directory where Packer will be installed. On Unix systems, `~/packer` or `/usr/local/packer` is generally good, -depending on whether you want to restrict the install to just your user -or install it system-wide. On Windows systems, you can put it wherever you'd -like. +depending on whether you want to restrict the install to just your user or +install it system-wide. On Windows systems, you can put it wherever you'd like. After unzipping the package, the directory should contain a set of binary -programs, such as `packer`, `packer-build-amazon-ebs`, etc. The final step -to installation is to make sure the directory you installed Packer to -is on the PATH. See [this page](http://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux) -for instructions on setting the PATH on Linux and Mac. -[This page](http://stackoverflow.com/questions/1618280/where-can-i-set-path-to-make-exe-on-windows) +programs, such as `packer`, `packer-build-amazon-ebs`, etc. The final step to +installation is to make sure the directory you installed Packer to is on the +PATH. See [this +page](http://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux) +for instructions on setting the PATH on Linux and Mac. [This +page](http://stackoverflow.com/questions/1618280/where-can-i-set-path-to-make-exe-on-windows) contains instructions for setting the PATH on Windows. ## Verifying the Installation -After installing Packer, verify the installation worked by opening -a new command prompt or console, and checking that `packer` is available: +After installing Packer, verify the installation worked by opening a new command +prompt or console, and checking that `packer` is available: -```text +``` {.text} $ packer usage: packer [--version] [--help] [] @@ -50,8 +54,8 @@ Available commands are: ``` If you get an error that `packer` could not be found, then your PATH -environmental variable was not setup properly. Please go back and ensure -that your PATH variable contains the directory which has Packer installed. +environmental variable was not setup properly. Please go back and ensure that +your PATH variable contains the directory which has Packer installed. Otherwise, Packer is installed and you're ready to go! @@ -59,24 +63,24 @@ Otherwise, Packer is installed and you're ready to go! Installation from binary packages is currently the only officially supported installation method. The binary packages are guaranteed to be the latest -available version and match the proper checksums. However, in addition to -the official binaries, there are other unofficial 3rd party methods of -installation managed by the Packer community: +available version and match the proper checksums. However, in addition to the +official binaries, there are other unofficial 3rd party methods of installation +managed by the Packer community: ### Homebrew If you're using OS X and [Homebrew](http://brew.sh), you can install Packer: -```text +``` {.text} $ brew install packer ``` ### Chocolatey -If you're using Windows and [Chocolatey](http://chocolatey.org), you can install Packer from -Windows command line (cmd). Remember that this is updated by a 3rd party, so -it may not be the latest available version. +If you're using Windows and [Chocolatey](http://chocolatey.org), you can install +Packer from Windows command line (cmd). Remember that this is updated by a 3rd +party, so it may not be the latest available version. -```text +``` {.text} $ choco install packer ``` diff --git a/website/source/docs/machine-readable/command-build.html.markdown b/website/source/docs/machine-readable/command-build.html.markdown index 3320f43f1..7472b7bfc 100644 --- a/website/source/docs/machine-readable/command-build.html.markdown +++ b/website/source/docs/machine-readable/command-build.html.markdown @@ -1,163 +1,165 @@ --- -layout: "docs_machine_readable" -page_title: "Command: build - Machine-Readable Reference" -description: |- - These are the machine-readable types that exist as part of the output of `packer build`. ---- +description: | + These are the machine-readable types that exist as part of the output of + `packer build`. +layout: 'docs\_machine\_readable' +page_title: 'Command: build - Machine-Readable Reference' +... # Build Command Types -These are the machine-readable types that exist as part of the output -of `packer build`. +These are the machine-readable types that exist as part of the output of +`packer build`.
    -
    artifact (>= 2)
    -
    -

    - Information about an artifact of the targeted item. This is a - fairly complex (but uniform!) machine-readable type that contains - subtypes. The subtypes are documented within this page in the - syntax of "artifact subtype: SUBTYPE". The number of arguments within - that subtype is in addition to the artifact args. -

    +
    artifact (>= 2)
    +
    +

    + Information about an artifact of the targeted item. This is a + fairly complex (but uniform!) machine-readable type that contains + subtypes. The subtypes are documented within this page in the + syntax of "artifact subtype: SUBTYPE". The number of arguments within + that subtype is in addition to the artifact args. +

    -

    - Data 1: index - The zero-based index of the - artifact being described. This goes up to "artifact-count" (see - below). -

    -

    - Data 2: subtype - The subtype that describes - the remaining arguments. See the documentation for the - subtype docs throughout this page. -

    -

    - Data 3..n: subtype data - Zero or more additional - data points related to the subtype. The exact count and meaning - of this subtypes comes from the subtype documentation. -

    -
    +

    + Data 1: index - The zero-based index of the + artifact being described. This goes up to "artifact-count" (see + below). +

    +

    + Data 2: subtype - The subtype that describes + the remaining arguments. See the documentation for the + subtype docs throughout this page. +

    +

    + Data 3..n: subtype data - Zero or more additional + data points related to the subtype. The exact count and meaning + of this subtypes comes from the subtype documentation. +

    + -
    artifact-count (1)
    -
    -

    - The number of artifacts associated with the given target. This - will always be outputted _before_ any other artifact information, - so you're able to know how many upcoming artifacts to look for. -

    +
    artifact-count (1)
    +
    +

    + The number of artifacts associated with the given target. This + will always be outputted _before_ any other artifact information, + so you're able to know how many upcoming artifacts to look for. +

    -

    - Data 1: count - The number of artifacts as - a base 10 integer. -

    -
    +

    + Data 1: count - The number of artifacts as + a base 10 integer. +

    + -
    artifact subtype: builder-id (1)
    -
    -

    - The unique ID of the builder that created this artifact. -

    +
    artifact subtype: builder-id (1)
    +
    +

    + The unique ID of the builder that created this artifact. +

    -

    - Data 1: id - The unique ID of the builder. -

    -
    +

    + Data 1: id - The unique ID of the builder. +

    + -
    artifact subtype: end (0)
    -
    -

    - The last machine-readable output line outputted for an artifact. - This is a sentinel value so you know that no more data related to - the targetted artifact will be outputted. -

    -
    +
    artifact subtype: end (0)
    +
    +

    + The last machine-readable output line outputted for an artifact. + This is a sentinel value so you know that no more data related to + the targetted artifact will be outputted. +

    +
    -
    artifact subtype: file (2)
    -
    -

    - A single file associated with the artifact. There are 0 to - "files-count" of these entries to describe every file that is - part of the artifact. -

    +
    artifact subtype: file (2)
    +
    +

    + A single file associated with the artifact. There are 0 to + "files-count" of these entries to describe every file that is + part of the artifact. +

    -

    - Data 1: index - Zero-based index of the file. - This goes from 0 to "files-count" minus one. -

    +

    + Data 1: index - Zero-based index of the file. + This goes from 0 to "files-count" minus one. +

    -

    - Data 2: filename - The filename. -

    -
    +

    + Data 2: filename - The filename. +

    + -
    artifact subtype: files-count (1)
    -
    -

    - The number of files associated with this artifact. Not all - artifacts have files associated with it. -

    +
    artifact subtype: files-count (1)
    +
    +

    + The number of files associated with this artifact. Not all + artifacts have files associated with it. +

    -

    - Data 1: count - The number of files. -

    -
    +

    + Data 1: count - The number of files. +

    + -
    artifact subtype: id (1)
    -
    -

    - The ID (if any) of the artifact that was built. Not all artifacts - have associated IDs. For example, AMIs built have IDs associated - with them, but VirtualBox images do not. The exact format of the ID - is specific to the builder. -

    +
    artifact subtype: id (1)
    +
    +

    + The ID (if any) of the artifact that was built. Not all artifacts + have associated IDs. For example, AMIs built have IDs associated + with them, but VirtualBox images do not. The exact format of the ID + is specific to the builder. +

    -

    - Data 1: id - The ID of the artifact. -

    -
    +

    + Data 1: id - The ID of the artifact. +

    + -
    artifact subtype: nil (0)
    -
    -

    - If present, this means that the artifact was nil, or that the targeted - build completed successfully but no artifact was created. -

    -
    +
    artifact subtype: nil (0)
    +
    +

    + If present, this means that the artifact was nil, or that the targeted + build completed successfully but no artifact was created. +

    +
    -
    artifact subtype: string (1)
    -
    -

    - The human-readable string description of the artifact provided by - the artifact itself. -

    +
    artifact subtype: string (1)
    +
    +

    + The human-readable string description of the artifact provided by + the artifact itself. +

    -

    - Data 1: string - The string output for the artifact. -

    -
    +

    + Data 1: string - The string output for the artifact. +

    + -
    error-count (1)
    -
    -

    - The number of errors that occurred during the build. This will - always be outputted before any errors so you know how many are coming. -

    +
    error-count (1)
    +
    +

    + The number of errors that occurred during the build. This will + always be outputted before any errors so you know how many are coming. +

    -

    - Data 1: count - The number of build errors as - a base 10 integer. -

    -
    +

    + Data 1: count - The number of build errors as + a base 10 integer. +

    + -
    error (1)
    -
    -

    - A build error that occurred. The target of this output will be - the build that had the error. -

    +
    error (1)
    +
    +

    + A build error that occurred. The target of this output will be + the build that had the error. +

    + +

    + Data 1: error - The error message as a string. +

    +
    -

    - Data 1: error - The error message as a string. -

    -
    diff --git a/website/source/docs/machine-readable/command-inspect.html.markdown b/website/source/docs/machine-readable/command-inspect.html.markdown index 3f8bbb852..4a5d68876 100644 --- a/website/source/docs/machine-readable/command-inspect.html.markdown +++ b/website/source/docs/machine-readable/command-inspect.html.markdown @@ -1,63 +1,65 @@ --- -layout: "docs_machine_readable" -page_title: "Command: inspect - Machine-Readable Reference" -description: |- - These are the machine-readable types that exist as part of the output of `packer inspect`. ---- +description: | + These are the machine-readable types that exist as part of the output of + `packer inspect`. +layout: 'docs\_machine\_readable' +page_title: 'Command: inspect - Machine-Readable Reference' +... # Inspect Command Types -These are the machine-readable types that exist as part of the output -of `packer inspect`. +These are the machine-readable types that exist as part of the output of +`packer inspect`.
    -
    template-variable (3)
    -
    -

    - A user variable - defined within the template. -

    +
    template-variable (3)
    +
    +

    + A user variable + defined within the template. +

    -

    - Data 1: name - Name of the variable. -

    +

    + Data 1: name - Name of the variable. +

    -

    - Data 2: default - The default value of the - variable. -

    +

    + Data 2: default - The default value of the + variable. +

    -

    - Data 3: required - If non-zero, then this variable - is required. -

    -
    +

    + Data 3: required - If non-zero, then this variable + is required. +

    + -
    template-builder (2)
    -
    -

    - A builder defined within the template -

    +
    template-builder (2)
    +
    +

    + A builder defined within the template +

    -

    - Data 1: name - The name of the builder. -

    -

    - Data 2: type - The type of the builder. This will - generally be the same as the name unless you explicitly override - the name. -

    -
    +

    + Data 1: name - The name of the builder. +

    +

    + Data 2: type - The type of the builder. This will + generally be the same as the name unless you explicitly override + the name. +

    + -
    template-provisioner (1)
    -
    -

    - A provisioner defined within the template. Multiple of these may - exist. If so, they are outputted in the order they would run. -

    +
    template-provisioner (1)
    +
    +

    + A provisioner defined within the template. Multiple of these may + exist. If so, they are outputted in the order they would run. +

    + +

    + Data 1: name - The name/type of the provisioner. +

    +
    -

    - Data 1: name - The name/type of the provisioner. -

    -
    diff --git a/website/source/docs/machine-readable/command-version.html.markdown b/website/source/docs/machine-readable/command-version.html.markdown index a7029b627..8b32b2540 100644 --- a/website/source/docs/machine-readable/command-version.html.markdown +++ b/website/source/docs/machine-readable/command-version.html.markdown @@ -1,47 +1,49 @@ --- -layout: "docs_machine_readable" -page_title: "Command: version - Machine-Readable Reference" -description: |- - These are the machine-readable types that exist as part of the output of `packer version`. ---- +description: | + These are the machine-readable types that exist as part of the output of + `packer version`. +layout: 'docs\_machine\_readable' +page_title: 'Command: version - Machine-Readable Reference' +... # Version Command Types -These are the machine-readable types that exist as part of the output -of `packer version`. +These are the machine-readable types that exist as part of the output of +`packer version`.
    -
    version (1)
    -
    -

    The version number of Packer running.

    +
    version (1)
    +
    +

    The version number of Packer running.

    -

    - Data 1: version - The version of Packer running, - only including the major, minor, and patch versions. Example: - "0.2.4". -

    -
    +

    + Data 1: version - The version of Packer running, + only including the major, minor, and patch versions. Example: + "0.2.4". +

    + -
    version-commit (1)
    -
    -

    The SHA1 of the Git commit that built this version of Packer.

    +
    version-commit (1)
    +
    +

    The SHA1 of the Git commit that built this version of Packer.

    -

    - Data 1: commit SHA1 - The SHA1 of the commit. -

    -
    +

    + Data 1: commit SHA1 - The SHA1 of the commit. +

    + -
    version-prerelease (1)
    -
    -

    - The prerelease tag (if any) for the running version of Packer. This - can be "beta", "dev", "alpha", etc. If this is empty, you can assume - it is a release version running. -

    +
    version-prerelease (1)
    +
    +

    + The prerelease tag (if any) for the running version of Packer. This + can be "beta", "dev", "alpha", etc. If this is empty, you can assume + it is a release version running. +

    + +

    + Data 1: prerelease name - The name of the + prerelease tag. +

    +
    -

    - Data 1: prerelease name - The name of the - prerelease tag. -

    -
    diff --git a/website/source/docs/machine-readable/general.html.markdown b/website/source/docs/machine-readable/general.html.markdown index 1f08be4d2..b29ae053f 100644 --- a/website/source/docs/machine-readable/general.html.markdown +++ b/website/source/docs/machine-readable/general.html.markdown @@ -1,9 +1,10 @@ --- -layout: "docs_machine_readable" -page_title: "General Types - Machine-Readable Reference" -description: |- - These are the machine-readable types that can appear in almost any machine-readable output and are provided by Packer core itself. ---- +description: | + These are the machine-readable types that can appear in almost any + machine-readable output and are provided by Packer core itself. +layout: 'docs\_machine\_readable' +page_title: 'General Types - Machine-Readable Reference' +... # General Types @@ -11,21 +12,22 @@ These are the machine-readable types that can appear in almost any machine-readable output and are provided by Packer core itself.
    -
    ui (2)
    -
    -

    - Specifies the output and type of output that would've normally - gone to the console if Packer were running in human-readable - mode. -

    +
    ui (2)
    +
    +

    + Specifies the output and type of output that would've normally + gone to the console if Packer were running in human-readable + mode. +

    + +

    + Data 1: type - The type of UI message that would've + been outputted. Can be "say", "message", or "error". +

    +

    + Data 2: output - The UI message that would have + been outputted. +

    +
    -

    - Data 1: type - The type of UI message that would've - been outputted. Can be "say", "message", or "error". -

    -

    - Data 2: output - The UI message that would have - been outputted. -

    -
    diff --git a/website/source/docs/machine-readable/index.html.markdown b/website/source/docs/machine-readable/index.html.markdown index d26106b15..161bda001 100644 --- a/website/source/docs/machine-readable/index.html.markdown +++ b/website/source/docs/machine-readable/index.html.markdown @@ -1,32 +1,35 @@ --- -layout: "docs_machine_readable" -page_title: "Machine-Readable Reference" -description: |- - This is the reference for the various message categories for Packer machine-readable output. Please read that page if you're unfamiliar with the general format and usage for the machine-readable output. ---- +description: | + This is the reference for the various message categories for Packer + machine-readable output. Please read that page if you're unfamiliar with the + general format and usage for the machine-readable output. +layout: 'docs\_machine\_readable' +page_title: 'Machine-Readable Reference' +... # Machine-Readable Reference This is the reference for the various message categories for Packer -[machine-readable output](/docs/command-line/machine-readable.html). -Please read that page if you're unfamiliar with the general format and -usage for the machine-readable output. +[machine-readable output](/docs/command-line/machine-readable.html). Please read +that page if you're unfamiliar with the general format and usage for the +machine-readable output. -The layout of this reference is split into where the types come from. -There are a set of core types that are from Packer core itself. Then -there are types that come from various components of Packer such as the -builders, provisioners, and more. +The layout of this reference is split into where the types come from. There are +a set of core types that are from Packer core itself. Then there are types that +come from various components of Packer such as the builders, provisioners, and +more. Within each section, the format of the documentation is the following:
    -
    type-name (data-count)
    -
    -

    Description of the type.

    -

    - Data 1: name - Description. -

    -
    +
    type-name (data-count)
    +
    +

    Description of the type.

    +

    + Data 1: name - Description. +

    +
    +
    diff --git a/website/source/docs/other/core-configuration.html.markdown b/website/source/docs/other/core-configuration.html.markdown index 3727af061..db1f75ab7 100644 --- a/website/source/docs/other/core-configuration.html.markdown +++ b/website/source/docs/other/core-configuration.html.markdown @@ -1,25 +1,29 @@ --- -layout: "docs" -page_title: "Core Configuration" -description: |- - There are a few configuration settings that affect Packer globally by configuring the core of Packer. These settings all have reasonable defaults, so you generally don't have to worry about it until you want to tweak a configuration. If you're just getting started with Packer, don't worry about core configuration for now. ---- +description: | + There are a few configuration settings that affect Packer globally by + configuring the core of Packer. These settings all have reasonable defaults, so + you generally don't have to worry about it until you want to tweak a + configuration. If you're just getting started with Packer, don't worry about + core configuration for now. +layout: docs +page_title: Core Configuration +... # Core Configuration There are a few configuration settings that affect Packer globally by configuring the core of Packer. These settings all have reasonable defaults, so -you generally don't have to worry about it until you want to tweak -a configuration. If you're just getting started with Packer, don't worry -about core configuration for now. +you generally don't have to worry about it until you want to tweak a +configuration. If you're just getting started with Packer, don't worry about +core configuration for now. -The default location where Packer looks for this file depends on the -platform. For all non-Windows platforms, Packer looks for `$HOME/.packerconfig`. -For Windows, Packer looks for `%APPDATA%/packer.config`. If the file -doesn't exist, then Packer ignores it and just uses the default configuration. +The default location where Packer looks for this file depends on the platform. +For all non-Windows platforms, Packer looks for `$HOME/.packerconfig`. For +Windows, Packer looks for `%APPDATA%/packer.config`. If the file doesn't exist, +then Packer ignores it and just uses the default configuration. -The location of the core configuration file can be modified by setting -the `PACKER_CONFIG` environmental variable to be the path to another file. +The location of the core configuration file can be modified by setting the +`PACKER_CONFIG` environmental variable to be the path to another file. The format of the configuration file is basic JSON. @@ -28,12 +32,13 @@ The format of the configuration file is basic JSON. Below is the list of all available configuration parameters for the core configuration file. None of these are required, since all have sane defaults. -* `plugin_min_port` and `plugin_max_port` (integer) - These are the minimum and - maximum ports that Packer uses for communication with plugins, since - plugin communication happens over TCP connections on your local host. - By default these are 10,000 and 25,000, respectively. Be sure to set a fairly - wide range here, since Packer can easily use over 25 ports on a single run. +- `plugin_min_port` and `plugin_max_port` (integer) - These are the minimum and + maximum ports that Packer uses for communication with plugins, since plugin + communication happens over TCP connections on your local host. By default + these are 10,000 and 25,000, respectively. Be sure to set a fairly wide range + here, since Packer can easily use over 25 ports on a single run. -* `builders`, `commands`, `post-processors`, and `provisioners` are objects that are used to - install plugins. The details of how exactly these are set is covered - in more detail in the [installing plugins documentation page](/docs/extend/plugins.html). +- `builders`, `commands`, `post-processors`, and `provisioners` are objects that + are used to install plugins. The details of how exactly these are set is + covered in more detail in the [installing plugins documentation + page](/docs/extend/plugins.html). diff --git a/website/source/docs/other/debugging.html.markdown b/website/source/docs/other/debugging.html.markdown index eabf56533..8c8012bc8 100644 --- a/website/source/docs/other/debugging.html.markdown +++ b/website/source/docs/other/debugging.html.markdown @@ -1,9 +1,12 @@ --- -layout: "docs" -page_title: "Debugging Packer" -description: |- - Packer strives to be stable and bug-free, but issues inevitably arise where certain things may not work entirely correctly, or may not appear to work correctly. In these cases, it is sometimes helpful to see more details about what Packer is actually doing. ---- +description: | + Packer strives to be stable and bug-free, but issues inevitably arise where + certain things may not work entirely correctly, or may not appear to work + correctly. In these cases, it is sometimes helpful to see more details about + what Packer is actually doing. +layout: docs +page_title: Debugging Packer +... # Debugging Packer Builds @@ -17,39 +20,40 @@ usually will stop between each step, waiting for keyboard input before continuing. This will allow you to inspect state and so on. In debug mode once the remote instance is instantiated, Packer will emit to the -current directory an emphemeral private ssh key as a .pem file. Using that you +current directory an emphemeral private ssh key as a .pem file. Using that you can `ssh -i ` into the remote build instance and see what is going on -for debugging. The emphemeral key will be deleted at the end of the packer run +for debugging. The emphemeral key will be deleted at the end of the packer run during cleanup. ### Windows + As of Packer 0.8.1 the default WinRM communicator will emit the password for a Remote Desktop Connection into your instance. This happens following the several minute pause as the instance is booted. Note a .pem key is still created for securely transmitting the password. Packer automatically decrypts the password for you in debug mode. -## Debugging Packer +## Debugging Packer Issues occasionally arise where certain things may not work entirely correctly, or may not appear to work correctly. In these cases, it is sometimes helpful to see more details about what Packer is actually doing. Packer has detailed logs which can be enabled by setting the `PACKER_LOG` -environmental variable to any value like this `PACKER_LOG=1 packer build -`. This will cause detailed logs to appear on stderr. The logs -contain log messages from Packer as well as any plugins that are being used. Log -messages from plugins are prefixed by their application name. +environmental variable to any value like this +`PACKER_LOG=1 packer build `. This will cause detailed logs to +appear on stderr. The logs contain log messages from Packer as well as any +plugins that are being used. Log messages from plugins are prefixed by their +application name. -Note that because Packer is highly parallelized, log messages sometimes -appear out of order, especially with respect to plugins. In this case, -it is important to pay attention to the timestamp of the log messages -to determine order. +Note that because Packer is highly parallelized, log messages sometimes appear +out of order, especially with respect to plugins. In this case, it is important +to pay attention to the timestamp of the log messages to determine order. In addition to simply enabling the log, you can set `PACKER_LOG_PATH` in order -to force the log to always go to a specific file when logging is enabled. -Note that even when `PACKER_LOG_PATH` is set, `PACKER_LOG` must be set in -order for any logging to be enabled. +to force the log to always go to a specific file when logging is enabled. Note +that even when `PACKER_LOG_PATH` is set, `PACKER_LOG` must be set in order for +any logging to be enabled. -If you find a bug with Packer, please include the detailed log by using -a service such as [gist](http://gist.github.com). +If you find a bug with Packer, please include the detailed log by using a +service such as [gist](http://gist.github.com). diff --git a/website/source/docs/other/environmental-variables.html.markdown b/website/source/docs/other/environmental-variables.html.markdown index 318e25e25..7d455c708 100644 --- a/website/source/docs/other/environmental-variables.html.markdown +++ b/website/source/docs/other/environmental-variables.html.markdown @@ -1,34 +1,36 @@ --- -layout: "docs" -page_title: "Environmental Variables for Packer" -description: |- - Packer uses a variety of environmental variables. ---- +description: 'Packer uses a variety of environmental variables.' +layout: docs +page_title: Environmental Variables for Packer +... # Environmental Variables for Packer -Packer uses a variety of environmental variables. A listing and description of each can be found below: +Packer uses a variety of environmental variables. A listing and description of +each can be found below: -* `PACKER_CACHE_DIR` - The location of the packer cache. +- `PACKER_CACHE_DIR` - The location of the packer cache. -* `PACKER_CONFIG` - The location of the core configuration file. The format - of the configuration file is basic JSON. - See the [core configuration page](/docs/other/core-configuration.html). +- `PACKER_CONFIG` - The location of the core configuration file. The format of + the configuration file is basic JSON. See the [core configuration + page](/docs/other/core-configuration.html). -* `PACKER_LOG` - Setting this to any value will enable the logger. - See the [debugging page](/docs/other/debugging.html). +- `PACKER_LOG` - Setting this to any value will enable the logger. See the + [debugging page](/docs/other/debugging.html). -* `PACKER_LOG_PATH` - The location of the log file. Note: `PACKER_LOG` must - be set for any logging to occur. See the [debugging page](/docs/other/debugging.html). +- `PACKER_LOG_PATH` - The location of the log file. Note: `PACKER_LOG` must be + set for any logging to occur. See the [debugging + page](/docs/other/debugging.html). -* `PACKER_NO_COLOR` - Setting this to any value will disable color in the terminal. +- `PACKER_NO_COLOR` - Setting this to any value will disable color in + the terminal. -* `PACKER_PLUGIN_MAX_PORT` - The maximum port that Packer uses for - communication with plugins, since plugin communication happens over - TCP connections on your local host. The default is 25,000. - See the [core configuration page](/docs/other/core-configuration.html). +- `PACKER_PLUGIN_MAX_PORT` - The maximum port that Packer uses for communication + with plugins, since plugin communication happens over TCP connections on your + local host. The default is 25,000. See the [core configuration + page](/docs/other/core-configuration.html). -* `PACKER_PLUGIN_MIN_PORT` - The minimum port that Packer uses for - communication with plugins, since plugin communication happens - over TCP connections on your local host. The default is 10,000. - See the [core configuration page](/docs/other/core-configuration.html). +- `PACKER_PLUGIN_MIN_PORT` - The minimum port that Packer uses for communication + with plugins, since plugin communication happens over TCP connections on your + local host. The default is 10,000. See the [core configuration + page](/docs/other/core-configuration.html). diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index 91b78e766..c038a119a 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -1,27 +1,38 @@ --- -layout: "docs" -page_title: "Atlas Post-Processor" -description: |- - The Atlas post-processor for Packer receives an artifact from a Packer build and uploads it to Atlas. Atlas hosts and serves artifacts, allowing you to version and distribute them in a simple way. ---- +description: | + The Atlas post-processor for Packer receives an artifact from a Packer build and + uploads it to Atlas. Atlas hosts and serves artifacts, allowing you to version + and distribute them in a simple way. +layout: docs +page_title: 'Atlas Post-Processor' +... # Atlas Post-Processor Type: `atlas` -The Atlas post-processor for Packer receives an artifact from a Packer build and uploads it to Atlas. [Atlas](https://atlas.hashicorp.com) hosts and serves artifacts, allowing you to version and distribute them in a simple way. +The Atlas post-processor for Packer receives an artifact from a Packer build and +uploads it to Atlas. [Atlas](https://atlas.hashicorp.com) hosts and serves +artifacts, allowing you to version and distribute them in a simple way. ## Workflow To take full advantage of Packer and Atlas, it's important to understand the -workflow for creating artifacts with Packer and storing them in Atlas using this post-processor. The goal of the Atlas post-processor is to streamline the distribution of public or private artifacts by hosting them in a central location in Atlas. +workflow for creating artifacts with Packer and storing them in Atlas using this +post-processor. The goal of the Atlas post-processor is to streamline the +distribution of public or private artifacts by hosting them in a central +location in Atlas. Here is an example workflow: -1. Packer builds an AMI with the [Amazon AMI builder](/docs/builders/amazon.html) -2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas. The `atlas` post-processor is configured with the name of the AMI, for example `hashicorp/foobar`, to create the artifact in Atlas or update the version if the artifact already exists -3. The new version is ready and available to be used in deployments with a tool like [Terraform](https://terraform.io) - +1. Packer builds an AMI with the [Amazon AMI + builder](/docs/builders/amazon.html) +2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas. + The `atlas` post-processor is configured with the name of the AMI, for example + `hashicorp/foobar`, to create the artifact in Atlas or update the version if + the artifact already exists +3. The new version is ready and available to be used in deployments with a tool + like [Terraform](https://terraform.io) ## Configuration @@ -29,32 +40,36 @@ The configuration allows you to specify and access the artifact in Atlas. ### Required: -* `token` (string) - Your access token for the Atlas API. - This can be generated on your [tokens page](https://atlas.hashicorp.com/settings/tokens). Alternatively you can export your Atlas token as an environmental variable and remove it from the configuration. +- `token` (string) - Your access token for the Atlas API. This can be generated + on your [tokens page](https://atlas.hashicorp.com/settings/tokens). + Alternatively you can export your Atlas token as an environmental variable and + remove it from the configuration. -* `artifact` (string) - The shorthand tag for your artifact that maps to - Atlas, i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`. You must - have access to the organization, hashicorp in this example, in order to add an artifact to - the organization in Atlas. +- `artifact` (string) - The shorthand tag for your artifact that maps to Atlas, + i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`. You must + have access to the organization, hashicorp in this example, in order to add an + artifact to the organization in Atlas. -* `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will always be `amazon.ami`. - This field must be defined because Atlas can host other artifact types, such as Vagrant boxes. +- `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will + always be `amazon.ami`. This field must be defined because Atlas can host + other artifact types, such as Vagrant boxes. --> **Note:** If you want to upload Vagrant boxes to Atlas, use the [Atlas post-processor](/docs/post-processors/atlas.html). +-> **Note:** If you want to upload Vagrant boxes to Atlas, use the [Atlas +post-processor](/docs/post-processors/atlas.html). ### Optional: -* `atlas_url` (string) - Override the base URL for Atlas. This -is useful if you're using Atlas Enterprise in your own network. Defaults -to `https://atlas.hashicorp.com/api/v1`. +- `atlas_url` (string) - Override the base URL for Atlas. This is useful if + you're using Atlas Enterprise in your own network. Defaults to + `https://atlas.hashicorp.com/api/v1`. -* `metadata` (map) - Send metadata about the artifact. If the artifact - type is "vagrant.box", you must specify a "provider" metadata about - what provider to use. +- `metadata` (map) - Send metadata about the artifact. If the artifact type is + "vagrant.box", you must specify a "provider" metadata about what provider + to use. ### Example Configuration -```javascript +``` {.javascript} { "variables": { "aws_access_key": "ACCESS_KEY_HERE", diff --git a/website/source/docs/post-processors/compress.html.markdown b/website/source/docs/post-processors/compress.html.markdown index e6a1237e9..716e4e866 100644 --- a/website/source/docs/post-processors/compress.html.markdown +++ b/website/source/docs/post-processors/compress.html.markdown @@ -1,9 +1,10 @@ --- -layout: "docs" -page_title: "compress Post-Processor" -description: |- - The Packer compress post-processor takes an artifact with files (such as from VMware or VirtualBox) and compresses the artifact into a single archive. ---- +description: | + The Packer compress post-processor takes an artifact with files (such as from + VMware or VirtualBox) and compresses the artifact into a single archive. +layout: docs +page_title: 'compress Post-Processor' +... # Compress Post-Processor @@ -16,49 +17,55 @@ VMware or VirtualBox) and compresses the artifact into a single archive. ### Required: -You must specify the output filename. The archive format is derived from the filename. +You must specify the output filename. The archive format is derived from the +filename. -* `output` (string) - The path to save the compressed archive. The archive - format is inferred from the filename. E.g. `.tar.gz` will be a gzipped - tarball. `.zip` will be a zip file. If the extension can't be detected packer - defaults to `.tar.gz` behavior but will not change the filename. +- `output` (string) - The path to save the compressed archive. The archive + format is inferred from the filename. E.g. `.tar.gz` will be a + gzipped tarball. `.zip` will be a zip file. If the extension can't be detected + packer defaults to `.tar.gz` behavior but will not change the filename. If you are executing multiple builders in parallel you should make sure - `output` is unique for each one. For example `packer_{{.BuildName}}_{{.Provider}}.zip`. + `output` is unique for each one. For example + `packer_{{.BuildName}}_{{.Provider}}.zip`. ### Optional: -If you want more control over how the archive is created you can specify the following settings: +If you want more control over how the archive is created you can specify the +following settings: -* `compression_level` (integer) - Specify the compression level, for algorithms +- `compression_level` (integer) - Specify the compression level, for algorithms that support it, from 1 through 9 inclusive. Typically higher compression levels take longer but produce smaller files. Defaults to `6` -* `keep_input_artifact` (boolean) - Keep source files; defaults to `false` +- `keep_input_artifact` (boolean) - Keep source files; defaults to `false` ### Supported Formats -Supported file extensions include `.zip`, `.tar`, `.gz`, `.tar.gz`, `.lz4` and `.tar.lz4`. Note that `.gz` and `.lz4` will fail if you have multiple files to compress. +Supported file extensions include `.zip`, `.tar`, `.gz`, `.tar.gz`, `.lz4` and +`.tar.lz4`. Note that `.gz` and `.lz4` will fail if you have multiple files to +compress. ## Examples -Some minimal examples are shown below, showing only the post-processor configuration: +Some minimal examples are shown below, showing only the post-processor +configuration: -```json +``` {.json} { "type": "compress", "output": "archive.tar.lz4" } ``` -```json +``` {.json} { "type": "compress", "output": "archive.zip" } ``` -```json +``` {.json} { "type": "compress", "output": "archive.gz", diff --git a/website/source/docs/post-processors/docker-import.html.markdown b/website/source/docs/post-processors/docker-import.html.markdown index c2d7bba80..0c3855622 100644 --- a/website/source/docs/post-processors/docker-import.html.markdown +++ b/website/source/docs/post-processors/docker-import.html.markdown @@ -1,36 +1,38 @@ --- -layout: "docs" -page_title: "docker-import Post-Processor" -description: |- - The Packer Docker import post-processor takes an artifact from the docker builder and imports it with Docker locally. This allows you to apply a repository and tag to the image and lets you use the other Docker post-processors such as docker-push to push the image to a registry. ---- +description: | + The Packer Docker import post-processor takes an artifact from the docker + builder and imports it with Docker locally. This allows you to apply a + repository and tag to the image and lets you use the other Docker + post-processors such as docker-push to push the image to a registry. +layout: docs +page_title: 'docker-import Post-Processor' +... # Docker Import Post-Processor Type: `docker-import` -The Packer Docker import post-processor takes an artifact from the -[docker builder](/docs/builders/docker.html) and imports it with Docker -locally. This allows you to apply a repository and tag to the image -and lets you use the other Docker post-processors such as -[docker-push](/docs/post-processors/docker-push.html) to push the image -to a registry. +The Packer Docker import post-processor takes an artifact from the [docker +builder](/docs/builders/docker.html) and imports it with Docker locally. This +allows you to apply a repository and tag to the image and lets you use the other +Docker post-processors such as +[docker-push](/docs/post-processors/docker-push.html) to push the image to a +registry. ## Configuration -The configuration for this post-processor is extremely simple. At least -a repository is required. +The configuration for this post-processor is extremely simple. At least a +repository is required. -* `repository` (string) - The repository of the imported image. +- `repository` (string) - The repository of the imported image. -* `tag` (string) - The tag for the imported image. By default this is not - set. +- `tag` (string) - The tag for the imported image. By default this is not set. ## Example An example is shown below, showing only the post-processor configuration: -```javascript +``` {.javascript} { "type": "docker-import", "repository": "mitchellh/packer", @@ -38,9 +40,9 @@ An example is shown below, showing only the post-processor configuration: } ``` -This example would take the image created by the Docker builder -and import it into the local Docker process with a name of `mitchellh/packer:0.7`. +This example would take the image created by the Docker builder and import it +into the local Docker process with a name of `mitchellh/packer:0.7`. Following this, you can use the -[docker-push](/docs/post-processors/docker-push.html) -post-processor to push it to a registry, if you want. +[docker-push](/docs/post-processors/docker-push.html) post-processor to push it +to a registry, if you want. diff --git a/website/source/docs/post-processors/docker-push.html.markdown b/website/source/docs/post-processors/docker-push.html.markdown index 2f7ae3e92..72793b735 100644 --- a/website/source/docs/post-processors/docker-push.html.markdown +++ b/website/source/docs/post-processors/docker-push.html.markdown @@ -1,38 +1,38 @@ --- -layout: "docs" -page_title: "Docker Push Post-Processor" -description: |- - The Packer Docker push post-processor takes an artifact from the docker-import post-processor and pushes it to a Docker registry. ---- +description: | + The Packer Docker push post-processor takes an artifact from the docker-import + post-processor and pushes it to a Docker registry. +layout: docs +page_title: 'Docker Push Post-Processor' +... # Docker Push Post-Processor Type: `docker-push` The Packer Docker push post-processor takes an artifact from the -[docker-import](/docs/post-processors/docker-import.html) post-processor -and pushes it to a Docker registry. +[docker-import](/docs/post-processors/docker-import.html) post-processor and +pushes it to a Docker registry. ## Configuration This post-processor has only optional configuration: -* `login` (boolean) - Defaults to false. If true, the post-processor will - login prior to pushing. +- `login` (boolean) - Defaults to false. If true, the post-processor will login + prior to pushing. -* `login_email` (string) - The email to use to authenticate to login. +- `login_email` (string) - The email to use to authenticate to login. -* `login_username` (string) - The username to use to authenticate to login. +- `login_username` (string) - The username to use to authenticate to login. -* `login_password` (string) - The password to use to authenticate to login. +- `login_password` (string) - The password to use to authenticate to login. -* `login_server` (string) - The server address to login to. +- `login_server` (string) - The server address to login to. --> **Note:** If you login using the credentials above, the -post-processor will automatically log you out afterwards (just the server -specified). +-> **Note:** If you login using the credentials above, the post-processor +will automatically log you out afterwards (just the server specified). ## Example -For an example of using docker-push, see the section on using -generated artifacts from the [docker builder](/docs/builders/docker.html). +For an example of using docker-push, see the section on using generated +artifacts from the [docker builder](/docs/builders/docker.html). diff --git a/website/source/docs/post-processors/docker-save.html.markdown b/website/source/docs/post-processors/docker-save.html.markdown index ca03dfcf6..8f758755c 100644 --- a/website/source/docs/post-processors/docker-save.html.markdown +++ b/website/source/docs/post-processors/docker-save.html.markdown @@ -1,35 +1,37 @@ --- -layout: "docs" -page_title: "docker-save Post-Processor" -description: |- - The Packer Docker Save post-processor takes an artifact from the docker builder that was committed and saves it to a file. This is similar to exporting the Docker image directly from the builder, except that it preserves the hierarchy of images and metadata. ---- +description: | + The Packer Docker Save post-processor takes an artifact from the docker builder + that was committed and saves it to a file. This is similar to exporting the + Docker image directly from the builder, except that it preserves the hierarchy + of images and metadata. +layout: docs +page_title: 'docker-save Post-Processor' +... # Docker Save Post-Processor Type: `docker-save` -The Packer Docker Save post-processor takes an artifact from the -[docker builder](/docs/builders/docker.html) that was committed -and saves it to a file. This is similar to exporting the Docker image -directly from the builder, except that it preserves the hierarchy of -images and metadata. +The Packer Docker Save post-processor takes an artifact from the [docker +builder](/docs/builders/docker.html) that was committed and saves it to a file. +This is similar to exporting the Docker image directly from the builder, except +that it preserves the hierarchy of images and metadata. -We understand the terminology can be a bit confusing, but we've -adopted the terminology from Docker, so if you're familiar with that, then -you'll be familiar with this and vice versa. +We understand the terminology can be a bit confusing, but we've adopted the +terminology from Docker, so if you're familiar with that, then you'll be +familiar with this and vice versa. ## Configuration The configuration for this post-processor is extremely simple. -* `path` (string) - The path to save the image. +- `path` (string) - The path to save the image. ## Example An example is shown below, showing only the post-processor configuration: -```javascript +``` {.javascript} { "type": "docker-save", "path": "foo.tar" diff --git a/website/source/docs/post-processors/docker-tag.html.markdown b/website/source/docs/post-processors/docker-tag.html.markdown index d3925d1fa..42c480676 100644 --- a/website/source/docs/post-processors/docker-tag.html.markdown +++ b/website/source/docs/post-processors/docker-tag.html.markdown @@ -1,43 +1,44 @@ --- -layout: "docs" -page_title: "docker-tag Post-Processor" -description: |- - The Packer Docker Tag post-processor takes an artifact from the docker builder that was committed and tags it into a repository. This allows you to use the other Docker post-processors such as docker-push to push the image to a registry. ---- +description: | + The Packer Docker Tag post-processor takes an artifact from the docker builder + that was committed and tags it into a repository. This allows you to use the + other Docker post-processors such as docker-push to push the image to a + registry. +layout: docs +page_title: 'docker-tag Post-Processor' +... # Docker Tag Post-Processor Type: `docker-tag` -The Packer Docker Tag post-processor takes an artifact from the -[docker builder](/docs/builders/docker.html) that was committed -and tags it into a repository. This allows you to use the other -Docker post-processors such as -[docker-push](/docs/post-processors/docker-push.html) to push the image -to a registry. +The Packer Docker Tag post-processor takes an artifact from the [docker +builder](/docs/builders/docker.html) that was committed and tags it into a +repository. This allows you to use the other Docker post-processors such as +[docker-push](/docs/post-processors/docker-push.html) to push the image to a +registry. -This is very similar to the [docker-import](/docs/post-processors/docker-import.html) -post-processor except that this works with committed resources, rather -than exported. +This is very similar to the +[docker-import](/docs/post-processors/docker-import.html) post-processor except +that this works with committed resources, rather than exported. ## Configuration -The configuration for this post-processor is extremely simple. At least -a repository is required. +The configuration for this post-processor is extremely simple. At least a +repository is required. -* `repository` (string) - The repository of the image. +- `repository` (string) - The repository of the image. -* `tag` (string) - The tag for the image. By default this is not - set. +- `tag` (string) - The tag for the image. By default this is not set. -* `force` (boolean) - If true, this post-processor forcibly tag the image - even if tag name is collided. Default to `false`. +- `force` (boolean) - If true, this post-processor forcibly tag the image even + if tag name is collided. Default to `false`. ## Example An example is shown below, showing only the post-processor configuration: -```javascript +``` {.javascript} { "type": "docker-tag", "repository": "mitchellh/packer", @@ -45,9 +46,9 @@ An example is shown below, showing only the post-processor configuration: } ``` -This example would take the image created by the Docker builder -and tag it into the local Docker process with a name of `mitchellh/packer:0.7`. +This example would take the image created by the Docker builder and tag it into +the local Docker process with a name of `mitchellh/packer:0.7`. Following this, you can use the -[docker-push](/docs/post-processors/docker-push.html) -post-processor to push it to a registry, if you want. +[docker-push](/docs/post-processors/docker-push.html) post-processor to push it +to a registry, if you want. diff --git a/website/source/docs/post-processors/vagrant-cloud.html.markdown b/website/source/docs/post-processors/vagrant-cloud.html.markdown index 451ed087b..e049552da 100644 --- a/website/source/docs/post-processors/vagrant-cloud.html.markdown +++ b/website/source/docs/post-processors/vagrant-cloud.html.markdown @@ -1,81 +1,88 @@ --- -layout: "docs" -page_title: "Vagrant Cloud Post-Processor" -description: |- - The Packer Vagrant Cloud post-processor receives a Vagrant box from the `vagrant` post-processor and pushes it to Vagrant Cloud. Vagrant Cloud hosts and serves boxes to Vagrant, allowing you to version and distribute boxes to an organization in a simple way. ---- +description: | + The Packer Vagrant Cloud post-processor receives a Vagrant box from the + `vagrant` post-processor and pushes it to Vagrant Cloud. Vagrant Cloud hosts and + serves boxes to Vagrant, allowing you to version and distribute boxes to an + organization in a simple way. +layout: docs +page_title: 'Vagrant Cloud Post-Processor' +... # Vagrant Cloud Post-Processor -~> Vagrant Cloud has been superseded by Atlas. Please use the [Atlas post-processor](/docs/post-processors/atlas.html) instead. Learn more about [Atlas](https://atlas.hashicorp.com/). +\~> Vagrant Cloud has been superseded by Atlas. Please use the [Atlas +post-processor](/docs/post-processors/atlas.html) instead. Learn more about +[Atlas](https://atlas.hashicorp.com/). Type: `vagrant-cloud` -The Packer Vagrant Cloud post-processor receives a Vagrant box from the `vagrant` -post-processor and pushes it to Vagrant Cloud. [Vagrant Cloud](https://vagrantcloud.com) -hosts and serves boxes to Vagrant, allowing you to version and distribute -boxes to an organization in a simple way. +The Packer Vagrant Cloud post-processor receives a Vagrant box from the +`vagrant` post-processor and pushes it to Vagrant Cloud. [Vagrant +Cloud](https://vagrantcloud.com) hosts and serves boxes to Vagrant, allowing you +to version and distribute boxes to an organization in a simple way. -You'll need to be familiar with Vagrant Cloud, have an upgraded account -to enable box hosting, and be distributing your box via the [shorthand name](http://docs.vagrantup.com/v2/cli/box.html) -configuration. +You'll need to be familiar with Vagrant Cloud, have an upgraded account to +enable box hosting, and be distributing your box via the [shorthand +name](http://docs.vagrantup.com/v2/cli/box.html) configuration. ## Workflow It's important to understand the workflow that using this post-processor enforces in order to take full advantage of Vagrant and Vagrant Cloud. -The use of this processor assume that you currently distribute, or plan -to distribute, boxes via Vagrant Cloud. It also assumes you create Vagrant -Boxes and deliver them to your team in some fashion. +The use of this processor assume that you currently distribute, or plan to +distribute, boxes via Vagrant Cloud. It also assumes you create Vagrant Boxes +and deliver them to your team in some fashion. Here is an example workflow: -1. You use Packer to build a Vagrant Box for the `virtualbox` provider -2. The `vagrant-cloud` post-processor is configured to point to the box `hashicorp/foobar` on Vagrant Cloud -via the `box_tag` configuration -2. The post-processor receives the box from the `vagrant` post-processor -3. It then creates the configured version, or verifies the existence of it, on Vagrant Cloud -4. A provider matching the name of the Vagrant provider is then created -5. The box is uploaded to Vagrant Cloud -6. The upload is verified -7. The version is released and available to users of the box - +1. You use Packer to build a Vagrant Box for the `virtualbox` provider +2. The `vagrant-cloud` post-processor is configured to point to the box + `hashicorp/foobar` on Vagrant Cloud via the `box_tag` configuration +3. The post-processor receives the box from the `vagrant` post-processor +4. It then creates the configured version, or verifies the existence of it, on + Vagrant Cloud +5. A provider matching the name of the Vagrant provider is then created +6. The box is uploaded to Vagrant Cloud +7. The upload is verified +8. The version is released and available to users of the box ## Configuration -The configuration allows you to specify the target box that you have -access to on Vagrant Cloud, as well as authentication and version information. +The configuration allows you to specify the target box that you have access to +on Vagrant Cloud, as well as authentication and version information. ### Required: -* `access_token` (string) - Your access token for the Vagrant Cloud API. - This can be generated on your [tokens page](https://vagrantcloud.com/account/tokens). +- `access_token` (string) - Your access token for the Vagrant Cloud API. This + can be generated on your [tokens + page](https://vagrantcloud.com/account/tokens). -* `box_tag` (string) - The shorthand tag for your box that maps to - Vagrant Cloud, i.e `hashicorp/precise64` for `vagrantcloud.com/hashicorp/precise64` - -* `version` (string) - The version number, typically incrementing a previous version. - The version string is validated based on [Semantic Versioning](http://semver.org/). The string must match - a pattern that could be semver, and doesn't validate that the version comes after - your previous versions. +- `box_tag` (string) - The shorthand tag for your box that maps to Vagrant + Cloud, i.e `hashicorp/precise64` for `vagrantcloud.com/hashicorp/precise64` +- `version` (string) - The version number, typically incrementing a + previous version. The version string is validated based on [Semantic + Versioning](http://semver.org/). The string must match a pattern that could be + semver, and doesn't validate that the version comes after your + previous versions. ### Optional: -* `no_release` (string) - If set to true, does not release the version -on Vagrant Cloud, making it active. You can manually release the version -via the API or Web UI. Defaults to false. +- `no_release` (string) - If set to true, does not release the version on + Vagrant Cloud, making it active. You can manually release the version via the + API or Web UI. Defaults to false. -* `vagrant_cloud_url` (string) - Override the base URL for Vagrant Cloud. This -is useful if you're using Vagrant Private Cloud in your own network. Defaults -to `https://vagrantcloud.com/api/v1` +- `vagrant_cloud_url` (string) - Override the base URL for Vagrant Cloud. This + is useful if you're using Vagrant Private Cloud in your own network. Defaults + to `https://vagrantcloud.com/api/v1` -* `version_description` (string) - Optionally markdown text used as a full-length - and in-depth description of the version, typically for denoting changes introduced +- `version_description` (string) - Optionally markdown text used as a + full-length and in-depth description of the version, typically for denoting + changes introduced -* `box_download_url` (string) - Optional URL for a self-hosted box. If this is set -the box will not be uploaded to the Vagrant Cloud. +- `box_download_url` (string) - Optional URL for a self-hosted box. If this is + set the box will not be uploaded to the Vagrant Cloud. ## Use with Vagrant Post-Processor @@ -84,7 +91,7 @@ An example configuration is below. Note the use of a doubly-nested array, which ensures that the Vagrant Cloud post-processor is run after the Vagrant post-processor. -```javascript +``` {.javascript} { "variables": { "version": "", diff --git a/website/source/docs/post-processors/vagrant.html.markdown b/website/source/docs/post-processors/vagrant.html.markdown index 7ed19d665..da1b8daa9 100644 --- a/website/source/docs/post-processors/vagrant.html.markdown +++ b/website/source/docs/post-processors/vagrant.html.markdown @@ -1,91 +1,90 @@ --- -layout: "docs" -page_title: "Vagrant Post-Processor" -description: |- - The Packer Vagrant post-processor takes a build and converts the artifact into a valid Vagrant box, if it can. This lets you use Packer to automatically create arbitrarily complex Vagrant boxes, and is in fact how the official boxes distributed by Vagrant are created. ---- +description: | + The Packer Vagrant post-processor takes a build and converts the artifact into a + valid Vagrant box, if it can. This lets you use Packer to automatically create + arbitrarily complex Vagrant boxes, and is in fact how the official boxes + distributed by Vagrant are created. +layout: docs +page_title: 'Vagrant Post-Processor' +... # Vagrant Post-Processor Type: `vagrant` -The Packer Vagrant post-processor takes a build and converts the artifact -into a valid [Vagrant](http://www.vagrantup.com) box, if it can. -This lets you use Packer to automatically create arbitrarily complex -Vagrant boxes, and is in fact how the official boxes distributed by -Vagrant are created. +The Packer Vagrant post-processor takes a build and converts the artifact into a +valid [Vagrant](http://www.vagrantup.com) box, if it can. This lets you use +Packer to automatically create arbitrarily complex Vagrant boxes, and is in fact +how the official boxes distributed by Vagrant are created. -If you've never used a post-processor before, please read the -documentation on [using post-processors](/docs/templates/post-processors.html) -in templates. This knowledge will be expected for the remainder of -this document. +If you've never used a post-processor before, please read the documentation on +[using post-processors](/docs/templates/post-processors.html) in templates. This +knowledge will be expected for the remainder of this document. -Because Vagrant boxes are [provider-specific](http://docs.vagrantup.com/v2/boxes/format.html), -the Vagrant post-processor is hardcoded to understand how to convert -the artifacts of certain builders into proper boxes for their -respective providers. +Because Vagrant boxes are +[provider-specific](http://docs.vagrantup.com/v2/boxes/format.html), the Vagrant +post-processor is hardcoded to understand how to convert the artifacts of +certain builders into proper boxes for their respective providers. Currently, the Vagrant post-processor can create boxes for the following providers. -* AWS -* DigitalOcean -* Hyper-V -* Parallels -* QEMU -* VirtualBox -* VMware +- AWS +- DigitalOcean +- Hyper-V +- Parallels +- QEMU +- VirtualBox +- VMware --> **Support for additional providers** is planned. If the -Vagrant post-processor doesn't support creating boxes for a provider you -care about, please help by contributing to Packer and adding support for it. +-> **Support for additional providers** is planned. If the Vagrant +post-processor doesn't support creating boxes for a provider you care about, +please help by contributing to Packer and adding support for it. ## Configuration The simplest way to use the post-processor is to just enable it. No -configuration is required by default. This will mostly do what you expect -and will build functioning boxes for many of the built-in builders of -Packer. +configuration is required by default. This will mostly do what you expect and +will build functioning boxes for many of the built-in builders of Packer. -However, if you want to configure things a bit more, the post-processor -does expose some configuration options. The available options are listed -below, with more details about certain options in following sections. +However, if you want to configure things a bit more, the post-processor does +expose some configuration options. The available options are listed below, with +more details about certain options in following sections. -* `compression_level` (integer) - An integer representing the - compression level to use when creating the Vagrant box. Valid - values range from 0 to 9, with 0 being no compression and 9 being - the best compression. By default, compression is enabled at level 6. +- `compression_level` (integer) - An integer representing the compression level + to use when creating the Vagrant box. Valid values range from 0 to 9, with 0 + being no compression and 9 being the best compression. By default, compression + is enabled at level 6. -* `include` (array of strings) - Paths to files to include in the - Vagrant box. These files will each be copied into the top level directory - of the Vagrant box (regardless of their paths). They can then be used - from the Vagrantfile. +- `include` (array of strings) - Paths to files to include in the Vagrant box. + These files will each be copied into the top level directory of the Vagrant + box (regardless of their paths). They can then be used from the Vagrantfile. -* `keep_input_artifact` (boolean) - If set to true, do not delete the +- `keep_input_artifact` (boolean) - If set to true, do not delete the `output_directory` on a successful build. Defaults to false. -* `output` (string) - The full path to the box file that will be created - by this post-processor. This is a - [configuration template](/docs/templates/configuration-templates.html). - The variable `Provider` is replaced by the Vagrant provider the box is for. - The variable `ArtifactId` is replaced by the ID of the input artifact. - The variable `BuildName` is replaced with the name of the build. - By default, the value of this config is `packer_{{.BuildName}}_{{.Provider}}.box`. +- `output` (string) - The full path to the box file that will be created by + this post-processor. This is a [configuration + template](/docs/templates/configuration-templates.html). The variable + `Provider` is replaced by the Vagrant provider the box is for. The variable + `ArtifactId` is replaced by the ID of the input artifact. The variable + `BuildName` is replaced with the name of the build. By default, the value of + this config is `packer_{{.BuildName}}_{{.Provider}}.box`. -* `vagrantfile_template` (string) - Path to a template to use for the +- `vagrantfile_template` (string) - Path to a template to use for the Vagrantfile that is packaged with the box. ## Provider-Specific Overrides -If you have a Packer template with multiple builder types within it, -you may want to configure the box creation for each type a little differently. -For example, the contents of the Vagrantfile for a Vagrant box for AWS might -be different from the contents of the Vagrantfile you want for VMware. -The post-processor lets you do this. +If you have a Packer template with multiple builder types within it, you may +want to configure the box creation for each type a little differently. For +example, the contents of the Vagrantfile for a Vagrant box for AWS might be +different from the contents of the Vagrantfile you want for VMware. The +post-processor lets you do this. Specify overrides within the `override` configuration by provider name: -```javascript +``` {.javascript} { "type": "vagrant", "compression_level": 1, @@ -97,18 +96,18 @@ Specify overrides within the `override` configuration by provider name: } ``` -In the example above, the compression level will be set to 1 except for -VMware, where it will be set to 0. +In the example above, the compression level will be set to 1 except for VMware, +where it will be set to 0. -The available provider names are: `aws`, `digitalocean`, `virtualbox`, -`vmware`, and `parallels`. +The available provider names are: `aws`, `digitalocean`, `virtualbox`, `vmware`, +and `parallels`. ## Input Artifacts -By default, Packer will delete the original input artifact, assuming -you only want the final Vagrant box as the result. If you wish to keep the -input artifact (the raw virtual machine, for example), then you must -configure Packer to keep it. +By default, Packer will delete the original input artifact, assuming you only +want the final Vagrant box as the result. If you wish to keep the input artifact +(the raw virtual machine, for example), then you must configure Packer to keep +it. -Please see the [documentation on input artifacts](/docs/templates/post-processors.html#toc_2) -for more information. +Please see the [documentation on input +artifacts](/docs/templates/post-processors.html#toc_2) for more information. diff --git a/website/source/docs/post-processors/vsphere.html.markdown b/website/source/docs/post-processors/vsphere.html.markdown index ca3f3f54a..f0fd9588e 100644 --- a/website/source/docs/post-processors/vsphere.html.markdown +++ b/website/source/docs/post-processors/vsphere.html.markdown @@ -1,16 +1,17 @@ --- -layout: "docs" -page_title: "vSphere Post-Processor" -description: |- - The Packer vSphere post-processor takes an artifact from the VMware builder and uploads it to a vSphere endpoint. ---- +description: | + The Packer vSphere post-processor takes an artifact from the VMware builder and + uploads it to a vSphere endpoint. +layout: docs +page_title: 'vSphere Post-Processor' +... # vSphere Post-Processor Type: `vsphere` -The Packer vSphere post-processor takes an artifact from the VMware builder -and uploads it to a vSphere endpoint. +The Packer vSphere post-processor takes an artifact from the VMware builder and +uploads it to a vSphere endpoint. ## Configuration @@ -20,37 +21,35 @@ each category, the available configuration keys are alphabetized. Required: -* `cluster` (string) - The cluster to upload the VM to. +- `cluster` (string) - The cluster to upload the VM to. -* `datacenter` (string) - The name of the datacenter within vSphere to - add the VM to. +- `datacenter` (string) - The name of the datacenter within vSphere to add the + VM to. -* `datastore` (string) - The name of the datastore to store this VM. - This is _not required_ if `resource_pool` is specified. +- `datastore` (string) - The name of the datastore to store this VM. This is + *not required* if `resource_pool` is specified. -* `host` (string) - The vSphere host that will be contacted to perform - the VM upload. +- `host` (string) - The vSphere host that will be contacted to perform the + VM upload. -* `password` (string) - Password to use to authenticate to the vSphere - endpoint. +- `password` (string) - Password to use to authenticate to the vSphere endpoint. -* `resource_pool` (string) - The resource pool to upload the VM to. - This is _not required_. +- `resource_pool` (string) - The resource pool to upload the VM to. This is *not + required*. -* `username` (string) - The username to use to authenticate to the vSphere - endpoint. +- `username` (string) - The username to use to authenticate to the + vSphere endpoint. -* `vm_name` (string) - The name of the VM once it is uploaded. +- `vm_name` (string) - The name of the VM once it is uploaded. Optional: -* `disk_mode` (string) - Target disk format. See `ovftool` manual for +- `disk_mode` (string) - Target disk format. See `ovftool` manual for available options. By default, "thick" will be used. -* `insecure` (boolean) - Whether or not the connection to vSphere can be done +- `insecure` (boolean) - Whether or not the connection to vSphere can be done over an insecure connection. By default this is false. -* `vm_folder` (string) - The folder within the datastore to store the VM. +- `vm_folder` (string) - The folder within the datastore to store the VM. -* `vm_network` (string) - The name of the VM network this VM will be - added to. +- `vm_network` (string) - The name of the VM network this VM will be added to. diff --git a/website/source/docs/provisioners/ansible-local.html.markdown b/website/source/docs/provisioners/ansible-local.html.markdown index a2550b7bd..5682043c9 100644 --- a/website/source/docs/provisioners/ansible-local.html.markdown +++ b/website/source/docs/provisioners/ansible-local.html.markdown @@ -1,23 +1,28 @@ --- -layout: "docs" -page_title: "Ansible (Local) Provisioner" -description: |- - The `ansible-local` Packer provisioner configures Ansible to run on the machine by Packer from local Playbook and Role files. Playbooks and Roles can be uploaded from your local machine to the remote machine. Ansible is run in local mode via the `ansible-playbook` command. ---- +description: | + The `ansible-local` Packer provisioner configures Ansible to run on the machine + by Packer from local Playbook and Role files. Playbooks and Roles can be + uploaded from your local machine to the remote machine. Ansible is run in local + mode via the `ansible-playbook` command. +layout: docs +page_title: 'Ansible (Local) Provisioner' +... # Ansible Local Provisioner Type: `ansible-local` -The `ansible-local` Packer provisioner configures Ansible to run on the machine by -Packer from local Playbook and Role files. Playbooks and Roles can be uploaded -from your local machine to the remote machine. Ansible is run in [local mode](http://docs.ansible.com/playbooks_delegation.html#local-playbooks) via the `ansible-playbook` command. +The `ansible-local` Packer provisioner configures Ansible to run on the machine +by Packer from local Playbook and Role files. Playbooks and Roles can be +uploaded from your local machine to the remote machine. Ansible is run in [local +mode](http://docs.ansible.com/playbooks_delegation.html#local-playbooks) via the +`ansible-playbook` command. ## Basic Example The example below is fully functional. -```javascript +``` {.javascript} { "type": "ansible-local", "playbook_file": "local.yml" @@ -30,39 +35,40 @@ The reference of available configuration options is listed below. Required: -* `playbook_file` (string) - The playbook file to be executed by ansible. - This file must exist on your local system and will be uploaded to the +- `playbook_file` (string) - The playbook file to be executed by ansible. This + file must exist on your local system and will be uploaded to the remote machine. Optional: -* `command` (string) - The command to invoke ansible. Defaults to "ansible-playbook". +- `command` (string) - The command to invoke ansible. Defaults + to "ansible-playbook". -* `extra_arguments` (array of strings) - An array of extra arguments to pass to the - ansible command. By default, this is empty. +- `extra_arguments` (array of strings) - An array of extra arguments to pass to + the ansible command. By default, this is empty. -* `inventory_groups` (string) - A comma-separated list of groups to which - packer will assign the host `127.0.0.1`. A value of `my_group_1,my_group_2` - will generate an Ansible inventory like: +- `inventory_groups` (string) - A comma-separated list of groups to which packer + will assign the host `127.0.0.1`. A value of `my_group_1,my_group_2` will + generate an Ansible inventory like: - ```text + ``` {.text} [my_group_1] 127.0.0.1 [my_group_2] 127.0.0.1 ``` -* `inventory_file` (string) - The inventory file to be used by ansible. - This file must exist on your local system and will be uploaded to the +- `inventory_file` (string) - The inventory file to be used by ansible. This + file must exist on your local system and will be uploaded to the remote machine. - When using an inventory file, it's also required to `--limit` the hosts to - the specified host you're buiding. The `--limit` argument can be provided in - the `extra_arguments` option. + When using an inventory file, it's also required to `--limit` the hosts to the + specified host you're buiding. The `--limit` argument can be provided in the + `extra_arguments` option. An example inventory file may look like: - ```text + ``` {.text} [chi-dbservers] db-01 ansible_connection=local db-02 ansible_connection=local @@ -82,29 +88,30 @@ Optional: chi-appservers ``` -* `playbook_dir` (string) - a path to the complete ansible directory - structure on your local system to be copied to the remote machine - as the `staging_directory` before all other files and directories. +- `playbook_dir` (string) - a path to the complete ansible directory structure + on your local system to be copied to the remote machine as the + `staging_directory` before all other files and directories. -* `playbook_paths` (array of strings) - An array of paths to playbook files on +- `playbook_paths` (array of strings) - An array of paths to playbook files on your local system. These will be uploaded to the remote machine under `staging_directory`/playbooks. By default, this is empty. -* `group_vars` (string) - a path to the directory containing ansible - group variables on your local system to be copied to the - remote machine. By default, this is empty. +- `group_vars` (string) - a path to the directory containing ansible group + variables on your local system to be copied to the remote machine. By default, + this is empty. -* `host_vars` (string) - a path to the directory containing ansible - host variables on your local system to be copied to the - remote machine. By default, this is empty. +- `host_vars` (string) - a path to the directory containing ansible host + variables on your local system to be copied to the remote machine. By default, + this is empty. -* `role_paths` (array of strings) - An array of paths to role directories on +- `role_paths` (array of strings) - An array of paths to role directories on your local system. These will be uploaded to the remote machine under `staging_directory`/roles. By default, this is empty. -* `staging_directory` (string) - The directory where all the configuration of - Ansible by Packer will be placed. By default this is "/tmp/packer-provisioner-ansible-local". - This directory doesn't need to exist but must have proper permissions so that - the SSH user that Packer uses is able to create directories and write into - this folder. If the permissions are not correct, use a shell provisioner prior - to this to configure it properly. +- `staging_directory` (string) - The directory where all the configuration of + Ansible by Packer will be placed. By default this + is "/tmp/packer-provisioner-ansible-local". This directory doesn't need to + exist but must have proper permissions so that the SSH user that Packer uses + is able to create directories and write into this folder. If the permissions + are not correct, use a shell provisioner prior to this to configure + it properly. diff --git a/website/source/docs/provisioners/chef-client.html.markdown b/website/source/docs/provisioners/chef-client.html.markdown index 3e56eecb2..81d097b7e 100644 --- a/website/source/docs/provisioners/chef-client.html.markdown +++ b/website/source/docs/provisioners/chef-client.html.markdown @@ -1,115 +1,120 @@ --- -layout: "docs" -page_title: "Chef-Client Provisioner" -description: |- - The Chef Client Packer provisioner installs and configures software on machines built by Packer using chef-client. Packer configures a Chef client to talk to a remote Chef Server to provision the machine. ---- +description: | + The Chef Client Packer provisioner installs and configures software on machines + built by Packer using chef-client. Packer configures a Chef client to talk to a + remote Chef Server to provision the machine. +layout: docs +page_title: 'Chef-Client Provisioner' +... # Chef Client Provisioner Type: `chef-client` -The Chef Client Packer provisioner installs and configures software on machines built -by Packer using [chef-client](http://docs.opscode.com/chef_client.html). -Packer configures a Chef client to talk to a remote Chef Server to -provision the machine. +The Chef Client Packer provisioner installs and configures software on machines +built by Packer using [chef-client](http://docs.opscode.com/chef_client.html). +Packer configures a Chef client to talk to a remote Chef Server to provision the +machine. The provisioner will even install Chef onto your machine if it isn't already installed, using the official Chef installers provided by Opscode. ## Basic Example -The example below is fully functional. It will install Chef onto the -remote machine and run Chef client. +The example below is fully functional. It will install Chef onto the remote +machine and run Chef client. -```javascript +``` {.javascript} { "type": "chef-client", "server_url": "https://mychefserver.com/" } ``` -Note: to properly clean up the Chef node and client the machine on which -packer is running must have knife on the path and configured globally, -i.e, ~/.chef/knife.rb must be present and configured for the target chef server +Note: to properly clean up the Chef node and client the machine on which packer +is running must have knife on the path and configured globally, i.e, +\~/.chef/knife.rb must be present and configured for the target chef server ## Configuration Reference The reference of available configuration options is listed below. No configuration is actually required. -* `chef_environment` (string) - The name of the chef_environment sent to the +- `chef_environment` (string) - The name of the chef\_environment sent to the Chef server. By default this is empty and will not use an environment. -* `config_template` (string) - Path to a template that will be used for - the Chef configuration file. By default Packer only sets configuration - it needs to match the settings set in the provisioner configuration. If - you need to set configurations that the Packer provisioner doesn't support, - then you should use a custom configuration template. See the dedicated - "Chef Configuration" section below for more details. +- `config_template` (string) - Path to a template that will be used for the Chef + configuration file. By default Packer only sets configuration it needs to + match the settings set in the provisioner configuration. If you need to set + configurations that the Packer provisioner doesn't support, then you should + use a custom configuration template. See the dedicated "Chef Configuration" + section below for more details. -* `execute_command` (string) - The command used to execute Chef. This has - various [configuration template variables](/docs/templates/configuration-templates.html) - available. See below for more information. +- `execute_command` (string) - The command used to execute Chef. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See below + for more information. -* `install_command` (string) - The command used to install Chef. This has - various [configuration template variables](/docs/templates/configuration-templates.html) - available. See below for more information. +- `install_command` (string) - The command used to install Chef. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See below + for more information. -* `json` (object) - An arbitrary mapping of JSON that will be available as - node attributes while running Chef. +- `json` (object) - An arbitrary mapping of JSON that will be available as node + attributes while running Chef. -* `node_name` (string) - The name of the node to register with the Chef - Server. This is optional and by default is packer-{{uuid}}. +- `node_name` (string) - The name of the node to register with the Chef Server. + This is optional and by default is packer-{{uuid}}. -* `prevent_sudo` (boolean) - By default, the configured commands that are +- `prevent_sudo` (boolean) - By default, the configured commands that are executed to install and run Chef are executed with `sudo`. If this is true, then the sudo will be omitted. -* `run_list` (array of strings) - The [run list](http://docs.opscode.com/essentials_node_object_run_lists.html) - for Chef. By default this is empty, and will use the run list sent - down by the Chef Server. +- `run_list` (array of strings) - The [run + list](http://docs.opscode.com/essentials_node_object_run_lists.html) for Chef. + By default this is empty, and will use the run list sent down by the + Chef Server. -* `server_url` (string) - The URL to the Chef server. This is required. +- `server_url` (string) - The URL to the Chef server. This is required. -* `skip_clean_client` (boolean) - If true, Packer won't remove the client - from the Chef server after it is done running. By default, this is false. +- `skip_clean_client` (boolean) - If true, Packer won't remove the client from + the Chef server after it is done running. By default, this is false. -* `skip_clean_node` (boolean) - If true, Packer won't remove the node - from the Chef server after it is done running. By default, this is false. +- `skip_clean_node` (boolean) - If true, Packer won't remove the node from the + Chef server after it is done running. By default, this is false. -* `skip_install` (boolean) - If true, Chef will not automatically be installed +- `skip_install` (boolean) - If true, Chef will not automatically be installed on the machine using the Opscode omnibus installers. -* `staging_directory` (string) - This is the directory where all the configuration - of Chef by Packer will be placed. By default this is "/tmp/packer-chef-client". - This directory doesn't need to exist but must have proper permissions so that - the SSH user that Packer uses is able to create directories and write into - this folder. If the permissions are not correct, use a shell provisioner - prior to this to configure it properly. +- `staging_directory` (string) - This is the directory where all the + configuration of Chef by Packer will be placed. By default this + is "/tmp/packer-chef-client". This directory doesn't need to exist but must + have proper permissions so that the SSH user that Packer uses is able to + create directories and write into this folder. If the permissions are not + correct, use a shell provisioner prior to this to configure it properly. -* `client_key` (string) - Path to client key. If not set, this defaults to a file - named client.pem in `staging_directory`. +- `client_key` (string) - Path to client key. If not set, this defaults to a + file named client.pem in `staging_directory`. -* `validation_client_name` (string) - Name of the validation client. If - not set, this won't be set in the configuration and the default that Chef - uses will be used. +- `validation_client_name` (string) - Name of the validation client. If not set, + this won't be set in the configuration and the default that Chef uses will + be used. -* `validation_key_path` (string) - Path to the validation key for communicating - with the Chef Server. This will be uploaded to the remote machine. If this - is NOT set, then it is your responsibility via other means (shell provisioner, - etc.) to get a validation key to where Chef expects it. +- `validation_key_path` (string) - Path to the validation key for communicating + with the Chef Server. This will be uploaded to the remote machine. If this is + NOT set, then it is your responsibility via other means (shell + provisioner, etc.) to get a validation key to where Chef expects it. ## Chef Configuration -By default, Packer uses a simple Chef configuration file in order to set -the options specified for the provisioner. But Chef is a complex tool that -supports many configuration options. Packer allows you to specify a custom -configuration template if you'd like to set custom configurations. +By default, Packer uses a simple Chef configuration file in order to set the +options specified for the provisioner. But Chef is a complex tool that supports +many configuration options. Packer allows you to specify a custom configuration +template if you'd like to set custom configurations. The default value for the configuration template is: -```liquid +``` {.liquid} log_level :info log_location STDOUT chef_server_url "{{.ServerUrl}}" @@ -126,42 +131,42 @@ node_name "{{.NodeName}}" {{end}} ``` -This template is a [configuration template](/docs/templates/configuration-templates.html) -and has a set of variables available to use: +This template is a [configuration +template](/docs/templates/configuration-templates.html) and has a set of +variables available to use: -* `NodeName` - The node name set in the configuration. -* `ServerUrl` - The URL of the Chef Server set in the configuration. -* `ValidationKeyPath` - Path to the validation key, if it is set. +- `NodeName` - The node name set in the configuration. +- `ServerUrl` - The URL of the Chef Server set in the configuration. +- `ValidationKeyPath` - Path to the validation key, if it is set. ## Execute Command -By default, Packer uses the following command (broken across multiple lines -for readability) to execute Chef: +By default, Packer uses the following command (broken across multiple lines for +readability) to execute Chef: -```liquid +``` {.liquid} {{if .Sudo}}sudo {{end}}chef-client \ --no-color \ -c {{.ConfigPath}} \ -j {{.JsonPath}} ``` -This command can be customized using the `execute_command` configuration. -As you can see from the default value above, the value of this configuration -can contain various template variables, defined below: +This command can be customized using the `execute_command` configuration. As you +can see from the default value above, the value of this configuration can +contain various template variables, defined below: -* `ConfigPath` - The path to the Chef configuration file. - file. -* `JsonPath` - The path to the JSON attributes file for the node. -* `Sudo` - A boolean of whether to `sudo` the command or not, depending on - the value of the `prevent_sudo` configuration. +- `ConfigPath` - The path to the Chef configuration file. file. +- `JsonPath` - The path to the JSON attributes file for the node. +- `Sudo` - A boolean of whether to `sudo` the command or not, depending on the + value of the `prevent_sudo` configuration. ## Install Command -By default, Packer uses the following command (broken across multiple lines -for readability) to install Chef. This command can be customized if you want -to install Chef in another way. +By default, Packer uses the following command (broken across multiple lines for +readability) to install Chef. This command can be customized if you want to +install Chef in another way. -```text +``` {.text} curl -L https://www.opscode.com/chef/install.sh | \ {{if .Sudo}}sudo{{end}} bash ``` @@ -170,9 +175,8 @@ This command can be customized using the `install_command` configuration. ## Folder Permissions -!> The `chef-client` provisioner will chmod the directory with your Chef -keys to 777. This is to ensure that Packer can upload and make use of that -directory. However, once the machine is created, you usually don't -want to keep these directories with those permissions. To change the -permissions on the directories, append a shell provisioner after Chef -to modify them. +!> The `chef-client` provisioner will chmod the directory with your Chef keys +to 777. This is to ensure that Packer can upload and make use of that directory. +However, once the machine is created, you usually don't want to keep these +directories with those permissions. To change the permissions on the +directories, append a shell provisioner after Chef to modify them. diff --git a/website/source/docs/provisioners/chef-solo.html.markdown b/website/source/docs/provisioners/chef-solo.html.markdown index 3a76c5514..03b55c066 100644 --- a/website/source/docs/provisioners/chef-solo.html.markdown +++ b/website/source/docs/provisioners/chef-solo.html.markdown @@ -1,28 +1,30 @@ --- -layout: "docs" -page_title: "Chef-Solo Provisioner" -description: |- - The Chef solo Packer provisioner installs and configures software on machines built by Packer using chef-solo. Cookbooks can be uploaded from your local machine to the remote machine or remote paths can be used. ---- +description: | + The Chef solo Packer provisioner installs and configures software on machines + built by Packer using chef-solo. Cookbooks can be uploaded from your local + machine to the remote machine or remote paths can be used. +layout: docs +page_title: 'Chef-Solo Provisioner' +... # Chef Solo Provisioner Type: `chef-solo` -The Chef solo Packer provisioner installs and configures software on machines built -by Packer using [chef-solo](https://docs.chef.io/chef_solo.html). Cookbooks -can be uploaded from your local machine to the remote machine or remote paths -can be used. +The Chef solo Packer provisioner installs and configures software on machines +built by Packer using [chef-solo](https://docs.chef.io/chef_solo.html). +Cookbooks can be uploaded from your local machine to the remote machine or +remote paths can be used. The provisioner will even install Chef onto your machine if it isn't already installed, using the official Chef installers provided by Chef Inc. ## Basic Example -The example below is fully functional and expects cookbooks in the -"cookbooks" directory relative to your working directory. +The example below is fully functional and expects cookbooks in the "cookbooks" +directory relative to your working directory. -```javascript +``` {.javascript} { "type": "chef-solo", "cookbook_paths": ["cookbooks"] @@ -34,124 +36,126 @@ The example below is fully functional and expects cookbooks in the The reference of available configuration options is listed below. No configuration is actually required, but at least `run_list` is recommended. -* `chef_environment` (string) - The name of the `chef_environment` sent to the +- `chef_environment` (string) - The name of the `chef_environment` sent to the Chef server. By default this is empty and will not use an environment -* `config_template` (string) - Path to a template that will be used for - the Chef configuration file. By default Packer only sets configuration - it needs to match the settings set in the provisioner configuration. If - you need to set configurations that the Packer provisioner doesn't support, - then you should use a custom configuration template. See the dedicated - "Chef Configuration" section below for more details. +- `config_template` (string) - Path to a template that will be used for the Chef + configuration file. By default Packer only sets configuration it needs to + match the settings set in the provisioner configuration. If you need to set + configurations that the Packer provisioner doesn't support, then you should + use a custom configuration template. See the dedicated "Chef Configuration" + section below for more details. -* `cookbook_paths` (array of strings) - This is an array of paths to - "cookbooks" directories on your local filesystem. These will be uploaded - to the remote machine in the directory specified by the `staging_directory`. - By default, this is empty. +- `cookbook_paths` (array of strings) - This is an array of paths to "cookbooks" + directories on your local filesystem. These will be uploaded to the remote + machine in the directory specified by the `staging_directory`. By default, + this is empty. -* `data_bags_path` (string) - The path to the "data\_bags" directory on your local filesystem. - These will be uploaded to the remote machine in the directory specified by the - `staging_directory`. By default, this is empty. +- `data_bags_path` (string) - The path to the "data\_bags" directory on your + local filesystem. These will be uploaded to the remote machine in the + directory specified by the `staging_directory`. By default, this is empty. -* `encrypted_data_bag_secret_path` (string) - The path to the file containing - the secret for encrypted data bags. By default, this is empty, so no - secret will be available. +- `encrypted_data_bag_secret_path` (string) - The path to the file containing + the secret for encrypted data bags. By default, this is empty, so no secret + will be available. -* `environments_path` (string) - The path to the "environments" directory on your local filesystem. - These will be uploaded to the remote machine in the directory specified by the - `staging_directory`. By default, this is empty. +- `environments_path` (string) - The path to the "environments" directory on + your local filesystem. These will be uploaded to the remote machine in the + directory specified by the `staging_directory`. By default, this is empty. -* `execute_command` (string) - The command used to execute Chef. This has - various [configuration template variables](/docs/templates/configuration-templates.html) - available. See below for more information. +- `execute_command` (string) - The command used to execute Chef. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See below + for more information. -* `install_command` (string) - The command used to install Chef. This has - various [configuration template variables](/docs/templates/configuration-templates.html) - available. See below for more information. +- `install_command` (string) - The command used to install Chef. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See below + for more information. -* `json` (object) - An arbitrary mapping of JSON that will be available as - node attributes while running Chef. +- `json` (object) - An arbitrary mapping of JSON that will be available as node + attributes while running Chef. -* `prevent_sudo` (boolean) - By default, the configured commands that are +- `prevent_sudo` (boolean) - By default, the configured commands that are executed to install and run Chef are executed with `sudo`. If this is true, then the sudo will be omitted. -* `remote_cookbook_paths` (array of strings) - A list of paths on the remote +- `remote_cookbook_paths` (array of strings) - A list of paths on the remote machine where cookbooks will already exist. These may exist from a previous provisioner or step. If specified, Chef will be configured to look for cookbooks here. By default, this is empty. -* `roles_path` (string) - The path to the "roles" directory on your local filesystem. - These will be uploaded to the remote machine in the directory specified by the - `staging_directory`. By default, this is empty. +- `roles_path` (string) - The path to the "roles" directory on your + local filesystem. These will be uploaded to the remote machine in the + directory specified by the `staging_directory`. By default, this is empty. -* `run_list` (array of strings) - The [run list](https://docs.chef.io/run_lists.html) - for Chef. By default this is empty. +- `run_list` (array of strings) - The [run + list](https://docs.chef.io/run_lists.html) for Chef. By default this is empty. -* `skip_install` (boolean) - If true, Chef will not automatically be installed +- `skip_install` (boolean) - If true, Chef will not automatically be installed on the machine using the Chef omnibus installers. -* `staging_directory` (string) - This is the directory where all the configuration - of Chef by Packer will be placed. By default this is "/tmp/packer-chef-solo". - This directory doesn't need to exist but must have proper permissions so that - the SSH user that Packer uses is able to create directories and write into - this folder. If the permissions are not correct, use a shell provisioner - prior to this to configure it properly. +- `staging_directory` (string) - This is the directory where all the + configuration of Chef by Packer will be placed. By default this + is "/tmp/packer-chef-solo". This directory doesn't need to exist but must have + proper permissions so that the SSH user that Packer uses is able to create + directories and write into this folder. If the permissions are not correct, + use a shell provisioner prior to this to configure it properly. ## Chef Configuration -By default, Packer uses a simple Chef configuration file in order to set -the options specified for the provisioner. But Chef is a complex tool that -supports many configuration options. Packer allows you to specify a custom -configuration template if you'd like to set custom configurations. +By default, Packer uses a simple Chef configuration file in order to set the +options specified for the provisioner. But Chef is a complex tool that supports +many configuration options. Packer allows you to specify a custom configuration +template if you'd like to set custom configurations. The default value for the configuration template is: -```liquid +``` {.liquid} cookbook_path [{{.CookbookPaths}}] ``` -This template is a [configuration template](/docs/templates/configuration-templates.html) -and has a set of variables available to use: +This template is a [configuration +template](/docs/templates/configuration-templates.html) and has a set of +variables available to use: -* `ChefEnvironment` - The current enabled environment. Only non-empty - if the environment path is set. -* `CookbookPaths` is the set of cookbook paths ready to embedded directly - into a Ruby array to configure Chef. -* `DataBagsPath` is the path to the data bags folder. -* `EncryptedDataBagSecretPath` - The path to the encrypted data bag secret -* `EnvironmentsPath` - The path to the environments folder. -* `RolesPath` - The path to the roles folder. +- `ChefEnvironment` - The current enabled environment. Only non-empty if the + environment path is set. +- `CookbookPaths` is the set of cookbook paths ready to embedded directly into a + Ruby array to configure Chef. +- `DataBagsPath` is the path to the data bags folder. +- `EncryptedDataBagSecretPath` - The path to the encrypted data bag secret +- `EnvironmentsPath` - The path to the environments folder. +- `RolesPath` - The path to the roles folder. ## Execute Command -By default, Packer uses the following command (broken across multiple lines -for readability) to execute Chef: +By default, Packer uses the following command (broken across multiple lines for +readability) to execute Chef: -```liquid +``` {.liquid} {{if .Sudo}}sudo {{end}}chef-solo \ --no-color \ -c {{.ConfigPath}} \ -j {{.JsonPath}} ``` -This command can be customized using the `execute_command` configuration. -As you can see from the default value above, the value of this configuration -can contain various template variables, defined below: +This command can be customized using the `execute_command` configuration. As you +can see from the default value above, the value of this configuration can +contain various template variables, defined below: -* `ConfigPath` - The path to the Chef configuration file. - file. -* `JsonPath` - The path to the JSON attributes file for the node. -* `Sudo` - A boolean of whether to `sudo` the command or not, depending on - the value of the `prevent_sudo` configuration. +- `ConfigPath` - The path to the Chef configuration file. file. +- `JsonPath` - The path to the JSON attributes file for the node. +- `Sudo` - A boolean of whether to `sudo` the command or not, depending on the + value of the `prevent_sudo` configuration. ## Install Command -By default, Packer uses the following command (broken across multiple lines -for readability) to install Chef. This command can be customized if you want -to install Chef in another way. +By default, Packer uses the following command (broken across multiple lines for +readability) to install Chef. This command can be customized if you want to +install Chef in another way. -```text +``` {.text} curl -L https://www.chef.io/chef/install.sh | \ {{if .Sudo}}sudo{{end}} bash ``` diff --git a/website/source/docs/provisioners/custom.html.markdown b/website/source/docs/provisioners/custom.html.markdown index 08df184fd..673ff3441 100644 --- a/website/source/docs/provisioners/custom.html.markdown +++ b/website/source/docs/provisioners/custom.html.markdown @@ -1,13 +1,16 @@ --- -layout: "docs" -page_title: "Custom Provisioner" -description: |- - Packer is extensible, allowing you to write new provisioners without having to modify the core source code of Packer itself. Documentation for creating new provisioners is covered in the custom provisioners page of the Packer plugin section. ---- +description: | + Packer is extensible, allowing you to write new provisioners without having to + modify the core source code of Packer itself. Documentation for creating new + provisioners is covered in the custom provisioners page of the Packer plugin + section. +layout: docs +page_title: Custom Provisioner +... # Custom Provisioner Packer is extensible, allowing you to write new provisioners without having to -modify the core source code of Packer itself. Documentation for creating -new provisioners is covered in the [custom provisioners](/docs/extend/provisioner.html) -page of the Packer plugin section. +modify the core source code of Packer itself. Documentation for creating new +provisioners is covered in the [custom +provisioners](/docs/extend/provisioner.html) page of the Packer plugin section. diff --git a/website/source/docs/provisioners/file.html.markdown b/website/source/docs/provisioners/file.html.markdown index 19fcce9be..3439b4dd6 100644 --- a/website/source/docs/provisioners/file.html.markdown +++ b/website/source/docs/provisioners/file.html.markdown @@ -1,24 +1,26 @@ --- -layout: "docs" -page_title: "File Provisioner" -description: |- - The file Packer provisioner uploads files to machines built by Packer. The recommended usage of the file provisioner is to use it to upload files, and then use shell provisioner to move them to the proper place, set permissions, etc. ---- +description: | + The file Packer provisioner uploads files to machines built by Packer. The + recommended usage of the file provisioner is to use it to upload files, and then + use shell provisioner to move them to the proper place, set permissions, etc. +layout: docs +page_title: File Provisioner +... # File Provisioner Type: `file` The file Packer provisioner uploads files to machines built by Packer. The -recommended usage of the file provisioner is to use it to upload files, -and then use [shell provisioner](/docs/provisioners/shell.html) to move -them to the proper place, set permissions, etc. +recommended usage of the file provisioner is to use it to upload files, and then +use [shell provisioner](/docs/provisioners/shell.html) to move them to the +proper place, set permissions, etc. The file provisioner can upload both single files and complete directories. ## Basic Example -```javascript +``` {.javascript} { "type": "file", "source": "app.tar.gz", @@ -30,42 +32,42 @@ The file provisioner can upload both single files and complete directories. The available configuration options are listed below. All elements are required. -* `source` (string) - The path to a local file or directory to upload to the - machine. The path can be absolute or relative. If it is relative, it is +- `source` (string) - The path to a local file or directory to upload to + the machine. The path can be absolute or relative. If it is relative, it is relative to the working directory when Packer is executed. If this is a directory, the existence of a trailing slash is important. Read below on uploading directories. -* `destination` (string) - The path where the file will be uploaded to in the - machine. This value must be a writable location and any parent directories +- `destination` (string) - The path where the file will be uploaded to in + the machine. This value must be a writable location and any parent directories must already exist. -* `direction` (string) - The direction of the file transfer. This defaults - to "upload." If it is set to "download" then the file "source" in - the machine wll be downloaded locally to "destination" +- `direction` (string) - The direction of the file transfer. This defaults to + "upload." If it is set to "download" then the file "source" in the machine wll + be downloaded locally to "destination" ## Directory Uploads -The file provisioner is also able to upload a complete directory to the -remote machine. When uploading a directory, there are a few important things -you should know. +The file provisioner is also able to upload a complete directory to the remote +machine. When uploading a directory, there are a few important things you should +know. -First, the destination directory must already exist. If you need to -create it, use a shell provisioner just prior to the file provisioner -in order to create the directory. +First, the destination directory must already exist. If you need to create it, +use a shell provisioner just prior to the file provisioner in order to create +the directory. Next, the existence of a trailing slash on the source path will determine -whether the directory name will be embedded within the destination, or -whether the destination will be created. An example explains this best: +whether the directory name will be embedded within the destination, or whether +the destination will be created. An example explains this best: -If the source is `/foo` (no trailing slash), and the destination is -`/tmp`, then the contents of `/foo` on the local machine will be uploaded -to `/tmp/foo` on the remote machine. The `foo` directory on the remote -machine will be created by Packer. +If the source is `/foo` (no trailing slash), and the destination is `/tmp`, then +the contents of `/foo` on the local machine will be uploaded to `/tmp/foo` on +the remote machine. The `foo` directory on the remote machine will be created by +Packer. -If the source, however, is `/foo/` (a trailing slash is present), and -the destination is `/tmp`, then the contents of `/foo` will be uploaded -into `/tmp` directly. +If the source, however, is `/foo/` (a trailing slash is present), and the +destination is `/tmp`, then the contents of `/foo` will be uploaded into `/tmp` +directly. -This behavior was adopted from the standard behavior of rsync. Note that -under the covers, rsync may or may not be used. +This behavior was adopted from the standard behavior of rsync. Note that under +the covers, rsync may or may not be used. diff --git a/website/source/docs/provisioners/powershell.html.markdown b/website/source/docs/provisioners/powershell.html.markdown index 69cb90b9a..ebc56ec4c 100644 --- a/website/source/docs/provisioners/powershell.html.markdown +++ b/website/source/docs/provisioners/powershell.html.markdown @@ -1,9 +1,11 @@ --- -layout: "docs" -page_title: "PowerShell Provisioner" -description: |- - The shell Packer provisioner provisions machines built by Packer using shell scripts. Shell provisioning is the easiest way to get software installed and configured on a machine. ---- +description: | + The shell Packer provisioner provisions machines built by Packer using shell + scripts. Shell provisioning is the easiest way to get software installed and + configured on a machine. +layout: docs +page_title: PowerShell Provisioner +... # PowerShell Provisioner @@ -16,7 +18,7 @@ It assumes that the communicator in use is WinRM. The example below is fully functional. -```javascript +``` {.javascript} { "type": "powershell", "inline": ["dir c:\\"] @@ -28,55 +30,54 @@ The example below is fully functional. The reference of available configuration options is listed below. The only required element is either "inline" or "script". Every other option is optional. -Exactly _one_ of the following is required: +Exactly *one* of the following is required: -* `inline` (array of strings) - This is an array of commands to execute. - The commands are concatenated by newlines and turned into a single file, - so they are all executed within the same context. This allows you to - change directories in one command and use something in the directory in - the next and so on. Inline scripts are the easiest way to pull off simple - tasks within the machine. +- `inline` (array of strings) - This is an array of commands to execute. The + commands are concatenated by newlines and turned into a single file, so they + are all executed within the same context. This allows you to change + directories in one command and use something in the directory in the next and + so on. Inline scripts are the easiest way to pull off simple tasks within + the machine. -* `script` (string) - The path to a script to upload and execute in the machine. - This path can be absolute or relative. If it is relative, it is relative - to the working directory when Packer is executed. +- `script` (string) - The path to a script to upload and execute in the machine. + This path can be absolute or relative. If it is relative, it is relative to + the working directory when Packer is executed. -* `scripts` (array of strings) - An array of scripts to execute. The scripts +- `scripts` (array of strings) - An array of scripts to execute. The scripts will be uploaded and executed in the order specified. Each script is executed in isolation, so state such as variables from one script won't carry on to the next. Optional parameters: -* `binary` (boolean) - If true, specifies that the script(s) are binary - files, and Packer should therefore not convert Windows line endings to - Unix line endings (if there are any). By default this is false. +- `binary` (boolean) - If true, specifies that the script(s) are binary files, + and Packer should therefore not convert Windows line endings to Unix line + endings (if there are any). By default this is false. -* `environment_vars` (array of strings) - An array of key/value pairs - to inject prior to the execute_command. The format should be - `key=value`. Packer injects some environmental variables by default - into the environment, as well, which are covered in the section below. +- `environment_vars` (array of strings) - An array of key/value pairs to inject + prior to the execute\_command. The format should be `key=value`. Packer + injects some environmental variables by default into the environment, as well, + which are covered in the section below. -* `execute_command` (string) - The command to use to execute the script. - By default this is `powershell "& { {{.Vars}}{{.Path}}; exit $LastExitCode}"`. - The value of this is treated as [configuration template](/docs/templates/configuration-templates.html). - There are two available variables: `Path`, which is - the path to the script to run, and `Vars`, which is the list of - `environment_vars`, if configured. +- `execute_command` (string) - The command to use to execute the script. By + default this is `powershell "& { {{.Vars}}{{.Path}}; exit $LastExitCode}"`. + The value of this is treated as [configuration + template](/docs/templates/configuration-templates.html). There are two + available variables: `Path`, which is the path to the script to run, and + `Vars`, which is the list of `environment_vars`, if configured. -* `elevated_user` and `elevated_password` (string) - If specified, - the PowerShell script will be run with elevated privileges using - the given Windows user. +- `elevated_user` and `elevated_password` (string) - If specified, the + PowerShell script will be run with elevated privileges using the given + Windows user. -* `remote_path` (string) - The path where the script will be uploaded to - in the machine. This defaults to "/tmp/script.sh". This value must be - a writable location and any parent directories must already exist. +- `remote_path` (string) - The path where the script will be uploaded to in + the machine. This defaults to "/tmp/script.sh". This value must be a writable + location and any parent directories must already exist. -* `start_retry_timeout` (string) - The amount of time to attempt to - _start_ the remote process. By default this is "5m" or 5 minutes. This - setting exists in order to deal with times when SSH may restart, such as - a system reboot. Set this to a higher value if reboots take a longer - amount of time. +- `start_retry_timeout` (string) - The amount of time to attempt to *start* the + remote process. By default this is "5m" or 5 minutes. This setting exists in + order to deal with times when SSH may restart, such as a system reboot. Set + this to a higher value if reboots take a longer amount of time. -* `valid_exit_codes` (list of ints) - Valid exit codes for the script. - By default this is just 0. +- `valid_exit_codes` (list of ints) - Valid exit codes for the script. By + default this is just 0. diff --git a/website/source/docs/provisioners/puppet-masterless.html.markdown b/website/source/docs/provisioners/puppet-masterless.html.markdown index 8fd05e4f2..ac5f4f628 100644 --- a/website/source/docs/provisioners/puppet-masterless.html.markdown +++ b/website/source/docs/provisioners/puppet-masterless.html.markdown @@ -1,33 +1,38 @@ --- -layout: "docs" -page_title: "Puppet (Masterless) Provisioner" -description: |- - The masterless Puppet Packer provisioner configures Puppet to run on the machines by Packer from local modules and manifest files. Modules and manifests can be uploaded from your local machine to the remote machine or can simply use remote paths (perhaps obtained using something like the shell provisioner). Puppet is run in masterless mode, meaning it never communicates to a Puppet master. ---- +description: | + The masterless Puppet Packer provisioner configures Puppet to run on the + machines by Packer from local modules and manifest files. Modules and manifests + can be uploaded from your local machine to the remote machine or can simply use + remote paths (perhaps obtained using something like the shell provisioner). + Puppet is run in masterless mode, meaning it never communicates to a Puppet + master. +layout: docs +page_title: 'Puppet (Masterless) Provisioner' +... # Puppet (Masterless) Provisioner Type: `puppet-masterless` -The masterless Puppet Packer provisioner configures Puppet to run on the machines -by Packer from local modules and manifest files. Modules and manifests -can be uploaded from your local machine to the remote machine or can simply -use remote paths (perhaps obtained using something like the shell provisioner). +The masterless Puppet Packer provisioner configures Puppet to run on the +machines by Packer from local modules and manifest files. Modules and manifests +can be uploaded from your local machine to the remote machine or can simply use +remote paths (perhaps obtained using something like the shell provisioner). Puppet is run in masterless mode, meaning it never communicates to a Puppet master. --> **Note:** Puppet will _not_ be installed automatically -by this provisioner. This provisioner expects that Puppet is already -installed on the machine. It is common practice to use the -[shell provisioner](/docs/provisioners/shell.html) before the -Puppet provisioner to do this. +-> **Note:** Puppet will *not* be installed automatically by this +provisioner. This provisioner expects that Puppet is already installed on the +machine. It is common practice to use the [shell +provisioner](/docs/provisioners/shell.html) before the Puppet provisioner to do +this. ## Basic Example -The example below is fully functional and expects the configured manifest -file to exist relative to your working directory: +The example below is fully functional and expects the configured manifest file +to exist relative to your working directory: -```javascript +``` {.javascript} { "type": "puppet-masterless", "manifest_file": "site.pp" @@ -40,63 +45,66 @@ The reference of available configuration options is listed below. Required parameters: -* `manifest_file` (string) - This is either a path to a puppet manifest (`.pp` - file) _or_ a directory containing multiple manifests that puppet will apply - (the ["main manifest"][1]). These file(s) must exist on your local system and - will be uploaded to the remote machine. - - [1]: https://docs.puppetlabs.com/puppet/latest/reference/dirs_manifest.html +- `manifest_file` (string) - This is either a path to a puppet manifest + (`.pp` file) *or* a directory containing multiple manifests that puppet will + apply (the ["main + manifest"](https://docs.puppetlabs.com/puppet/latest/reference/dirs_manifest.html)). + These file(s) must exist on your local system and will be uploaded to the + remote machine. Optional parameters: -* `execute_command` (string) - The command used to execute Puppet. This has - various [configuration template variables](/docs/templates/configuration-templates.html) - available. See below for more information. +- `execute_command` (string) - The command used to execute Puppet. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See below + for more information. -* `facter` (object of key/value strings) - Additional +- `facter` (object of key/value strings) - Additional [facts](http://puppetlabs.com/puppet/related-projects/facter) to make available when Puppet is running. -* `hiera_config_path` (string) - The path to a local file with hiera +- `hiera_config_path` (string) - The path to a local file with hiera configuration to be uploaded to the remote machine. Hiera data directories must be uploaded using the file provisioner separately. -* `manifest_dir` (string) - The path to a local directory with manifests - to be uploaded to the remote machine. This is useful if your main - manifest file uses imports. This directory doesn't necessarily contain - the `manifest_file`. It is a separate directory that will be set as - the "manifestdir" setting on Puppet. +- `manifest_dir` (string) - The path to a local directory with manifests to be + uploaded to the remote machine. This is useful if your main manifest file + uses imports. This directory doesn't necessarily contain the `manifest_file`. + It is a separate directory that will be set as the "manifestdir" setting + on Puppet. - ~> `manifest_dir` is passed to `puppet apply` as the `--manifestdir` option. - This option was deprecated in puppet 3.6, and removed in puppet 4.0. If you - have multiple manifests you should use `manifest_file` instead. + \~> `manifest_dir` is passed to `puppet apply` as the + `--manifestdir` option. This option was deprecated in puppet 3.6, and removed + in puppet 4.0. If you have multiple manifests you should use + `manifest_file` instead. -* `module_paths` (array of strings) - This is an array of paths to module - directories on your local filesystem. These will be uploaded to the remote - machine. By default, this is empty. +- `module_paths` (array of strings) - This is an array of paths to module + directories on your local filesystem. These will be uploaded to the + remote machine. By default, this is empty. -* `prevent_sudo` (boolean) - By default, the configured commands that are - executed to run Puppet are executed with `sudo`. If this is true, - then the sudo will be omitted. +- `prevent_sudo` (boolean) - By default, the configured commands that are + executed to run Puppet are executed with `sudo`. If this is true, then the + sudo will be omitted. -* `staging_directory` (string) - This is the directory where all the configuration - of Puppet by Packer will be placed. By default this is "/tmp/packer-puppet-masterless". - This directory doesn't need to exist but must have proper permissions so that - the SSH user that Packer uses is able to create directories and write into - this folder. If the permissions are not correct, use a shell provisioner - prior to this to configure it properly. +- `staging_directory` (string) - This is the directory where all the + configuration of Puppet by Packer will be placed. By default this + is "/tmp/packer-puppet-masterless". This directory doesn't need to exist but + must have proper permissions so that the SSH user that Packer uses is able to + create directories and write into this folder. If the permissions are not + correct, use a shell provisioner prior to this to configure it properly. -* `working_directory` (string) - This is the directory from which the puppet command - will be run. When using hiera with a relative path, this option allows to ensure - that the paths are working properly. If not specified, defaults to the value of - specified `staging_directory` (or its default value if not specified either). +- `working_directory` (string) - This is the directory from which the puppet + command will be run. When using hiera with a relative path, this option allows + to ensure that the paths are working properly. If not specified, defaults to + the value of specified `staging_directory` (or its default value if not + specified either). ## Execute Command -By default, Packer uses the following command (broken across multiple lines -for readability) to execute Puppet: +By default, Packer uses the following command (broken across multiple lines for +readability) to execute Puppet: -```liquid +``` {.liquid} cd {{.WorkingDir}} && \ {{.FacterVars}}{{if .Sudo}} sudo -E {{end}}puppet apply \ --verbose \ @@ -107,19 +115,19 @@ cd {{.WorkingDir}} && \ {{.ManifestFile}} ``` -This command can be customized using the `execute_command` configuration. -As you can see from the default value above, the value of this configuration -can contain various template variables, defined below: +This command can be customized using the `execute_command` configuration. As you +can see from the default value above, the value of this configuration can +contain various template variables, defined below: -* `WorkingDir` - The path from which Puppet will be executed. -* `FacterVars` - Shell-friendly string of environmental variables used - to set custom facts configured for this provisioner. -* `HieraConfigPath` - The path to a hiera configuration file. -* `ManifestFile` - The path on the remote machine to the manifest file - for Puppet to use. -* `ModulePath` - The paths to the module directories. -* `Sudo` - A boolean of whether to `sudo` the command or not, depending on - the value of the `prevent_sudo` configuration. +- `WorkingDir` - The path from which Puppet will be executed. +- `FacterVars` - Shell-friendly string of environmental variables used to set + custom facts configured for this provisioner. +- `HieraConfigPath` - The path to a hiera configuration file. +- `ManifestFile` - The path on the remote machine to the manifest file for + Puppet to use. +- `ModulePath` - The paths to the module directories. +- `Sudo` - A boolean of whether to `sudo` the command or not, depending on the + value of the `prevent_sudo` configuration. ## Default Facts @@ -127,10 +135,10 @@ In addition to being able to specify custom Facter facts using the `facter` configuration, the provisioner automatically defines certain commonly useful facts: -* `packer_build_name` is set to the name of the build that Packer is running. +- `packer_build_name` is set to the name of the build that Packer is running. This is most useful when Packer is making multiple builds and you want to distinguish them in your Hiera hierarchy. -* `packer_builder_type` is the type of the builder that was used to create the +- `packer_builder_type` is the type of the builder that was used to create the machine that Puppet is running on. This is useful if you want to run only certain parts of your Puppet code on systems built with certain builders. diff --git a/website/source/docs/provisioners/puppet-server.html.markdown b/website/source/docs/provisioners/puppet-server.html.markdown index 803ae22cf..32bcadbe8 100644 --- a/website/source/docs/provisioners/puppet-server.html.markdown +++ b/website/source/docs/provisioners/puppet-server.html.markdown @@ -1,29 +1,30 @@ --- -layout: "docs" -page_title: "Puppet Server Provisioner" -description: |- - The `puppet-server` Packer provisioner provisions Packer machines with Puppet by connecting to a Puppet master. ---- +description: | + The `puppet-server` Packer provisioner provisions Packer machines with Puppet by + connecting to a Puppet master. +layout: docs +page_title: Puppet Server Provisioner +... # Puppet Server Provisioner Type: `puppet-server` -The `puppet-server` Packer provisioner provisions Packer machines with Puppet -by connecting to a Puppet master. +The `puppet-server` Packer provisioner provisions Packer machines with Puppet by +connecting to a Puppet master. --> **Note:** Puppet will _not_ be installed automatically -by this provisioner. This provisioner expects that Puppet is already -installed on the machine. It is common practice to use the -[shell provisioner](/docs/provisioners/shell.html) before the -Puppet provisioner to do this. +-> **Note:** Puppet will *not* be installed automatically by this +provisioner. This provisioner expects that Puppet is already installed on the +machine. It is common practice to use the [shell +provisioner](/docs/provisioners/shell.html) before the Puppet provisioner to do +this. ## Basic Example -The example below is fully functional and expects a Puppet server to be accessible -from your network.: +The example below is fully functional and expects a Puppet server to be +accessible from your network.: -```javascript +``` {.javascript} { "type": "puppet-server", "options": "--test --pluginsync", @@ -37,39 +38,39 @@ from your network.: The reference of available configuration options is listed below. -The provisioner takes various options. None are strictly -required. They are listed below: +The provisioner takes various options. None are strictly required. They are +listed below: -* `client_cert_path` (string) - Path to the client certificate for the - node on your disk. This defaults to nothing, in which case a client - cert won't be uploaded. +- `client_cert_path` (string) - Path to the client certificate for the node on + your disk. This defaults to nothing, in which case a client cert won't + be uploaded. -* `client_private_key_path` (string) - Path to the client private key for - the node on your disk. This defaults to nothing, in which case a client - private key won't be uploaded. +- `client_private_key_path` (string) - Path to the client private key for the + node on your disk. This defaults to nothing, in which case a client private + key won't be uploaded. -* `facter` (object of key/value strings) - Additional Facter facts to make available to the - Puppet run. +- `facter` (object of key/value strings) - Additional Facter facts to make + available to the Puppet run. -* `ignore_exit_codes` (boolean) - If true, Packer will never consider the +- `ignore_exit_codes` (boolean) - If true, Packer will never consider the provisioner a failure. -* `options` (string) - Additional command line options to pass - to `puppet agent` when Puppet is ran. +- `options` (string) - Additional command line options to pass to `puppet agent` + when Puppet is ran. -* `prevent_sudo` (boolean) - By default, the configured commands that are - executed to run Puppet are executed with `sudo`. If this is true, - then the sudo will be omitted. +- `prevent_sudo` (boolean) - By default, the configured commands that are + executed to run Puppet are executed with `sudo`. If this is true, then the + sudo will be omitted. -* `puppet_node` (string) - The name of the node. If this isn't set, - the fully qualified domain name will be used. +- `puppet_node` (string) - The name of the node. If this isn't set, the fully + qualified domain name will be used. -* `puppet_server` (string) - Hostname of the Puppet server. By default - "puppet" will be used. +- `puppet_server` (string) - Hostname of the Puppet server. By default "puppet" + will be used. -* `staging_directory` (string) - This is the directory where all the configuration - of Puppet by Packer will be placed. By default this is "/tmp/packer-puppet-server". - This directory doesn't need to exist but must have proper permissions so that - the SSH user that Packer uses is able to create directories and write into - this folder. If the permissions are not correct, use a shell provisioner - prior to this to configure it properly. +- `staging_directory` (string) - This is the directory where all the + configuration of Puppet by Packer will be placed. By default this + is "/tmp/packer-puppet-server". This directory doesn't need to exist but must + have proper permissions so that the SSH user that Packer uses is able to + create directories and write into this folder. If the permissions are not + correct, use a shell provisioner prior to this to configure it properly. diff --git a/website/source/docs/provisioners/salt-masterless.html.markdown b/website/source/docs/provisioners/salt-masterless.html.markdown index a298bb28d..cc1ab1f7b 100644 --- a/website/source/docs/provisioners/salt-masterless.html.markdown +++ b/website/source/docs/provisioners/salt-masterless.html.markdown @@ -1,22 +1,23 @@ --- -layout: "docs" -page_title: "Salt (Masterless) Provisioner" -description: |- - The `salt-masterless` Packer provisioner provisions machines built by Packer using Salt states, without connecting to a Salt master. ---- +description: | + The `salt-masterless` Packer provisioner provisions machines built by Packer + using Salt states, without connecting to a Salt master. +layout: docs +page_title: 'Salt (Masterless) Provisioner' +... # Salt Masterless Provisioner Type: `salt-masterless` -The `salt-masterless` Packer provisioner provisions machines built by Packer using -[Salt](http://saltstack.com/) states, without connecting to a Salt master. +The `salt-masterless` Packer provisioner provisions machines built by Packer +using [Salt](http://saltstack.com/) states, without connecting to a Salt master. ## Basic Example The example below is fully functional. -```javascript +``` {.javascript} { "type": "salt-masterless", "local_state_tree": "/Users/me/salt" @@ -25,31 +26,33 @@ The example below is fully functional. ## Configuration Reference -The reference of available configuration options is listed below. The only required argument is the path to your local salt state tree. +The reference of available configuration options is listed below. The only +required argument is the path to your local salt state tree. Optional: -* `bootstrap_args` (string) - Arguments to send to the bootstrap script. Usage - is somewhat documented on [github](https://github.com/saltstack/salt-bootstrap), - but the [script itself](https://github.com/saltstack/salt-bootstrap/blob/develop/bootstrap-salt.sh) +- `bootstrap_args` (string) - Arguments to send to the bootstrap script. Usage + is somewhat documented on + [github](https://github.com/saltstack/salt-bootstrap), but the [script + itself](https://github.com/saltstack/salt-bootstrap/blob/develop/bootstrap-salt.sh) has more detailed usage instructions. By default, no arguments are sent to the script. -* `local_pillar_roots` (string) - The path to your local - [pillar roots](http://docs.saltstack.com/ref/configuration/master.html#pillar-configuration). +- `local_pillar_roots` (string) - The path to your local [pillar + roots](http://docs.saltstack.com/ref/configuration/master.html#pillar-configuration). This will be uploaded to the `/srv/pillar` on the remote. -* `local_state_tree` (string) - The path to your local - [state tree](http://docs.saltstack.com/ref/states/highstate.html#the-salt-state-tree). +- `local_state_tree` (string) - The path to your local [state + tree](http://docs.saltstack.com/ref/states/highstate.html#the-salt-state-tree). This will be uploaded to the `/srv/salt` on the remote. -* `minion_config` (string) - The path to your local - [minion config](http://docs.saltstack.com/topics/configuration.html). - This will be uploaded to the `/etc/salt` on the remote. +- `minion_config` (string) - The path to your local [minion + config](http://docs.saltstack.com/topics/configuration.html). This will be + uploaded to the `/etc/salt` on the remote. -* `skip_bootstrap` (boolean) - By default the salt provisioner runs - [salt bootstrap](https://github.com/saltstack/salt-bootstrap) to install - salt. Set this to true to skip this step. +- `skip_bootstrap` (boolean) - By default the salt provisioner runs [salt + bootstrap](https://github.com/saltstack/salt-bootstrap) to install salt. Set + this to true to skip this step. -* `temp_config_dir` (string) - Where your local state tree will be copied - before moving to the `/srv/salt` directory. Default is `/tmp/salt`. +- `temp_config_dir` (string) - Where your local state tree will be copied before + moving to the `/srv/salt` directory. Default is `/tmp/salt`. diff --git a/website/source/docs/provisioners/shell.html.markdown b/website/source/docs/provisioners/shell.html.markdown index dec270841..97015a847 100644 --- a/website/source/docs/provisioners/shell.html.markdown +++ b/website/source/docs/provisioners/shell.html.markdown @@ -1,27 +1,29 @@ --- -layout: "docs" -page_title: "Shell Provisioner" -description: |- - The shell Packer provisioner provisions machines built by Packer using shell scripts. Shell provisioning is the easiest way to get software installed and configured on a machine. ---- +description: | + The shell Packer provisioner provisions machines built by Packer using shell + scripts. Shell provisioning is the easiest way to get software installed and + configured on a machine. +layout: docs +page_title: Shell Provisioner +... # Shell Provisioner Type: `shell` -The shell Packer provisioner provisions machines built by Packer using shell scripts. -Shell provisioning is the easiest way to get software installed and configured -on a machine. +The shell Packer provisioner provisions machines built by Packer using shell +scripts. Shell provisioning is the easiest way to get software installed and +configured on a machine. --> **Building Windows images?** You probably want to use the -[PowerShell](/docs/provisioners/powershell.html) or -[Windows Shell](/docs/provisioners/windows-shell.html) provisioners. +-> **Building Windows images?** You probably want to use the +[PowerShell](/docs/provisioners/powershell.html) or [Windows +Shell](/docs/provisioners/windows-shell.html) provisioners. ## Basic Example The example below is fully functional. -```javascript +``` {.javascript} { "type": "shell", "inline": ["echo foo"] @@ -33,83 +35,82 @@ The example below is fully functional. The reference of available configuration options is listed below. The only required element is either "inline" or "script". Every other option is optional. -Exactly _one_ of the following is required: +Exactly *one* of the following is required: -* `inline` (array of strings) - This is an array of commands to execute. - The commands are concatenated by newlines and turned into a single file, - so they are all executed within the same context. This allows you to - change directories in one command and use something in the directory in - the next and so on. Inline scripts are the easiest way to pull off simple - tasks within the machine. +- `inline` (array of strings) - This is an array of commands to execute. The + commands are concatenated by newlines and turned into a single file, so they + are all executed within the same context. This allows you to change + directories in one command and use something in the directory in the next and + so on. Inline scripts are the easiest way to pull off simple tasks within + the machine. -* `script` (string) - The path to a script to upload and execute in the machine. - This path can be absolute or relative. If it is relative, it is relative - to the working directory when Packer is executed. +- `script` (string) - The path to a script to upload and execute in the machine. + This path can be absolute or relative. If it is relative, it is relative to + the working directory when Packer is executed. -* `scripts` (array of strings) - An array of scripts to execute. The scripts +- `scripts` (array of strings) - An array of scripts to execute. The scripts will be uploaded and executed in the order specified. Each script is executed in isolation, so state such as variables from one script won't carry on to the next. Optional parameters: -* `binary` (boolean) - If true, specifies that the script(s) are binary - files, and Packer should therefore not convert Windows line endings to - Unix line endings (if there are any). By default this is false. +- `binary` (boolean) - If true, specifies that the script(s) are binary files, + and Packer should therefore not convert Windows line endings to Unix line + endings (if there are any). By default this is false. -* `environment_vars` (array of strings) - An array of key/value pairs - to inject prior to the execute_command. The format should be - `key=value`. Packer injects some environmental variables by default - into the environment, as well, which are covered in the section below. +- `environment_vars` (array of strings) - An array of key/value pairs to inject + prior to the execute\_command. The format should be `key=value`. Packer + injects some environmental variables by default into the environment, as well, + which are covered in the section below. -* `execute_command` (string) - The command to use to execute the script. - By default this is `chmod +x {{ .Path }}; {{ .Vars }} {{ .Path }}`. The value of this is - treated as [configuration template](/docs/templates/configuration-templates.html). There are two available variables: `Path`, which is - the path to the script to run, and `Vars`, which is the list of - `environment_vars`, if configured. +- `execute_command` (string) - The command to use to execute the script. By + default this is `chmod +x {{ .Path }}; {{ .Vars }} {{ .Path }}`. The value of + this is treated as [configuration + template](/docs/templates/configuration-templates.html). There are two + available variables: `Path`, which is the path to the script to run, and + `Vars`, which is the list of `environment_vars`, if configured. -* `inline_shebang` (string) - The +- `inline_shebang` (string) - The [shebang](http://en.wikipedia.org/wiki/Shebang_%28Unix%29) value to use when - running commands specified by `inline`. By default, this is `/bin/sh -e`. - If you're not using `inline`, then this configuration has no effect. - **Important:** If you customize this, be sure to include something like - the `-e` flag, otherwise individual steps failing won't fail the provisioner. + running commands specified by `inline`. By default, this is `/bin/sh -e`. If + you're not using `inline`, then this configuration has no effect. + **Important:** If you customize this, be sure to include something like the + `-e` flag, otherwise individual steps failing won't fail the provisioner. -* `remote_path` (string) - The path where the script will be uploaded to - in the machine. This defaults to "/tmp/script.sh". This value must be - a writable location and any parent directories must already exist. +- `remote_path` (string) - The path where the script will be uploaded to in + the machine. This defaults to "/tmp/script.sh". This value must be a writable + location and any parent directories must already exist. -* `start_retry_timeout` (string) - The amount of time to attempt to - _start_ the remote process. By default this is "5m" or 5 minutes. This - setting exists in order to deal with times when SSH may restart, such as - a system reboot. Set this to a higher value if reboots take a longer - amount of time. +- `start_retry_timeout` (string) - The amount of time to attempt to *start* the + remote process. By default this is "5m" or 5 minutes. This setting exists in + order to deal with times when SSH may restart, such as a system reboot. Set + this to a higher value if reboots take a longer amount of time. ## Execute Command Example -To many new users, the `execute_command` is puzzling. However, it provides -an important function: customization of how the command is executed. The -most common use case for this is dealing with **sudo password prompts**. You may -also need to customize this if you use a non-POSIX shell, such as `tcsh` on -FreeBSD. +To many new users, the `execute_command` is puzzling. However, it provides an +important function: customization of how the command is executed. The most +common use case for this is dealing with **sudo password prompts**. You may also +need to customize this if you use a non-POSIX shell, such as `tcsh` on FreeBSD. ### Sudo Example -Some operating systems default to a non-root user. For example if you login -as `ubuntu` and can sudo using the password `packer`, then you'll want to -change `execute_command` to be: +Some operating systems default to a non-root user. For example if you login as +`ubuntu` and can sudo using the password `packer`, then you'll want to change +`execute_command` to be: -```text +``` {.text} "echo 'packer' | {{ .Vars }} sudo -E -S sh '{{ .Path }}'" ``` -The `-S` flag tells `sudo` to read the password from stdin, which in this -case is being piped in with the value of `packer`. The `-E` flag tells `sudo` -to preserve the environment, allowing our environmental variables to work -within the script. +The `-S` flag tells `sudo` to read the password from stdin, which in this case +is being piped in with the value of `packer`. The `-E` flag tells `sudo` to +preserve the environment, allowing our environmental variables to work within +the script. -By setting the `execute_command` to this, your script(s) can run with -root privileges without worrying about password prompts. +By setting the `execute_command` to this, your script(s) can run with root +privileges without worrying about password prompts. ### FreeBSD Example @@ -123,44 +124,44 @@ Note the addition of `env` before `{{ .Vars }}`. ## Default Environmental Variables -In addition to being able to specify custom environmental variables using -the `environment_vars` configuration, the provisioner automatically -defines certain commonly useful environmental variables: +In addition to being able to specify custom environmental variables using the +`environment_vars` configuration, the provisioner automatically defines certain +commonly useful environmental variables: -* `PACKER_BUILD_NAME` is set to the name of the build that Packer is running. +- `PACKER_BUILD_NAME` is set to the name of the build that Packer is running. This is most useful when Packer is making multiple builds and you want to distinguish them slightly from a common provisioning script. -* `PACKER_BUILDER_TYPE` is the type of the builder that was used to create - the machine that the script is running on. This is useful if you want to - run only certain parts of the script on systems built with certain builders. +- `PACKER_BUILDER_TYPE` is the type of the builder that was used to create the + machine that the script is running on. This is useful if you want to run only + certain parts of the script on systems built with certain builders. ## Handling Reboots Provisioning sometimes involves restarts, usually when updating the operating system. Packer is able to tolerate restarts via the shell provisioner. -Packer handles this by retrying to start scripts for a period of time -before failing. This allows time for the machine to start up and be ready -to run scripts. The amount of time the provisioner will wait is configured -using `start_retry_timeout`, which defaults to a few minutes. +Packer handles this by retrying to start scripts for a period of time before +failing. This allows time for the machine to start up and be ready to run +scripts. The amount of time the provisioner will wait is configured using +`start_retry_timeout`, which defaults to a few minutes. -Sometimes, when executing a command like `reboot`, the shell script will -return and Packer will start executing the next one before SSH actually -quits and the machine restarts. For this, put a long `sleep` after the -reboot so that SSH will eventually be killed automatically: +Sometimes, when executing a command like `reboot`, the shell script will return +and Packer will start executing the next one before SSH actually quits and the +machine restarts. For this, put a long `sleep` after the reboot so that SSH will +eventually be killed automatically: -```text +``` {.text} reboot sleep 60 ``` -Some OS configurations don't properly kill all network connections on -reboot, causing the provisioner to hang despite a reboot occurring. -In this case, make sure you shut down the network interfaces -on reboot or in your shell script. For example, on Gentoo: +Some OS configurations don't properly kill all network connections on reboot, +causing the provisioner to hang despite a reboot occurring. In this case, make +sure you shut down the network interfaces on reboot or in your shell script. For +example, on Gentoo: -```text +``` {.text} /etc/init.d/net.eth0 stop ``` @@ -170,59 +171,58 @@ Some provisioning requires connecting to remote SSH servers from within the packer instance. The below example is for pulling code from a private git repository utilizing openssh on the client. Make sure you are running `ssh-agent` and add your git repo ssh keys into it using `ssh-add /path/to/key`. -When the packer instance needs access to the ssh keys the agent will forward -the request back to your `ssh-agent`. +When the packer instance needs access to the ssh keys the agent will forward the +request back to your `ssh-agent`. -Note: when provisioning via git you should add the git server keys into -the `~/.ssh/known_hosts` file otherwise the git command could hang awaiting -input. This can be done by copying the file in via the -[file provisioner](/docs/provisioners/file.html) (more secure) -or using `ssh-keyscan` to populate the file (less secure). An example of the -latter accessing github would be: +Note: when provisioning via git you should add the git server keys into the +`~/.ssh/known_hosts` file otherwise the git command could hang awaiting input. +This can be done by copying the file in via the [file +provisioner](/docs/provisioners/file.html) (more secure) or using `ssh-keyscan` +to populate the file (less secure). An example of the latter accessing github +would be: -``` -{ - "type": "shell", - "inline": [ - "sudo apt-get install -y git", - "ssh-keyscan github.com >> ~/.ssh/known_hosts", - "git clone git@github.com:exampleorg/myprivaterepo.git" - ] -} -``` + { + "type": "shell", + "inline": [ + "sudo apt-get install -y git", + "ssh-keyscan github.com >> ~/.ssh/known_hosts", + "git clone git@github.com:exampleorg/myprivaterepo.git" + ] + } ## Troubleshooting *My shell script doesn't work correctly on Ubuntu* -* On Ubuntu, the `/bin/sh` shell is -[dash](http://en.wikipedia.org/wiki/Debian_Almquist_shell). If your script has -[bash](http://en.wikipedia.org/wiki/Bash_(Unix_shell))-specific commands in it, -then put `#!/bin/bash` at the top of your script. Differences -between dash and bash can be found on the [DashAsBinSh](https://wiki.ubuntu.com/DashAsBinSh) Ubuntu wiki page. +- On Ubuntu, the `/bin/sh` shell is + [dash](http://en.wikipedia.org/wiki/Debian_Almquist_shell). If your script has + [bash](http://en.wikipedia.org/wiki/Bash_(Unix_shell))-specific commands in + it, then put `#!/bin/bash` at the top of your script. Differences between dash + and bash can be found on the + [DashAsBinSh](https://wiki.ubuntu.com/DashAsBinSh) Ubuntu wiki page. *My shell works when I login but fails with the shell provisioner* -* See the above tip. More than likely, your login shell is using `/bin/bash` -while the provisioner is using `/bin/sh`. +- See the above tip. More than likely, your login shell is using `/bin/bash` + while the provisioner is using `/bin/sh`. *My installs hang when using `apt-get` or `yum`* -* Make sure you add a `-y` to the command to prevent it from requiring -user input before proceeding. +- Make sure you add a `-y` to the command to prevent it from requiring user + input before proceeding. *How do I tell what my shell script is doing?* -* Adding a `-x` flag to the shebang at the top of the script (`#!/bin/sh -x`) -will echo the script statements as it is executing. +- Adding a `-x` flag to the shebang at the top of the script (`#!/bin/sh -x`) + will echo the script statements as it is executing. *My builds don't always work the same* -* Some distributions start the SSH daemon before other core services which -can create race conditions. Your first provisioner can tell the machine to -wait until it completely boots. +- Some distributions start the SSH daemon before other core services which can + create race conditions. Your first provisioner can tell the machine to wait + until it completely boots. -```javascript +``` {.javascript} { "type": "shell", "inline": [ "sleep 10" ] diff --git a/website/source/docs/templates/builders.html.markdown b/website/source/docs/templates/builders.html.markdown index 2afb0a95c..594ed59e4 100644 --- a/website/source/docs/templates/builders.html.markdown +++ b/website/source/docs/templates/builders.html.markdown @@ -1,27 +1,28 @@ --- -layout: "docs" -page_title: "Templates: Builders" -description: |- - Within the template, the builders section contains an array of all the builders that Packer should use to generate a machine images for the template. ---- +description: | + Within the template, the builders section contains an array of all the builders + that Packer should use to generate a machine images for the template. +layout: docs +page_title: 'Templates: Builders' +... # Templates: Builders -Within the template, the builders section contains an array of all the -builders that Packer should use to generate a machine images for the template. +Within the template, the builders section contains an array of all the builders +that Packer should use to generate a machine images for the template. -Builders are responsible for creating machines and generating images from -them for various platforms. For example, there are separate builders for -EC2, VMware, VirtualBox, etc. Packer comes with many builders by default, -and can also be extended to add new builders. +Builders are responsible for creating machines and generating images from them +for various platforms. For example, there are separate builders for EC2, VMware, +VirtualBox, etc. Packer comes with many builders by default, and can also be +extended to add new builders. -This documentation page will cover how to configure a builder in a template. -The specific configuration options available for each builder, however, -must be referenced from the documentation for that specific builder. +This documentation page will cover how to configure a builder in a template. The +specific configuration options available for each builder, however, must be +referenced from the documentation for that specific builder. Within a template, a section of builder definitions looks like this: -```javascript +``` {.javascript} { "builders": [ // ... one or more builder definitions here @@ -31,19 +32,19 @@ Within a template, a section of builder definitions looks like this: ## Builder Definition -A single builder definition maps to exactly one [build](/docs/basics/terminology.html#term-build). -A builder definition is a JSON object that requires at least a `type` key. The -`type` is the name of the builder that will be used to create a machine image -for the build. +A single builder definition maps to exactly one +[build](/docs/basics/terminology.html#term-build). A builder definition is a +JSON object that requires at least a `type` key. The `type` is the name of the +builder that will be used to create a machine image for the build. -In addition to the `type`, other keys configure the builder itself. For -example, the AWS builder requires an `access_key`, `secret_key`, and -some other settings. These are placed directly within the builder definition. +In addition to the `type`, other keys configure the builder itself. For example, +the AWS builder requires an `access_key`, `secret_key`, and some other settings. +These are placed directly within the builder definition. -An example builder definition is shown below, in this case configuring -the AWS builder: +An example builder definition is shown below, in this case configuring the AWS +builder: -```javascript +``` {.javascript} { "type": "amazon-ebs", "access_key": "...", @@ -53,23 +54,22 @@ the AWS builder: ## Named Builds -Each build in Packer has a name. By default, the name is just the name -of the builder being used. In general, this is good enough. Names only serve -as an indicator in the output of what is happening. If you want, however, -you can specify a custom name using the `name` key within the builder definition. +Each build in Packer has a name. By default, the name is just the name of the +builder being used. In general, this is good enough. Names only serve as an +indicator in the output of what is happening. If you want, however, you can +specify a custom name using the `name` key within the builder definition. -This is particularly useful if you have multiple builds defined that use -the same underlying builder. In this case, you must specify a name for at least -one of them since the names must be unique. +This is particularly useful if you have multiple builds defined that use the +same underlying builder. In this case, you must specify a name for at least one +of them since the names must be unique. ## Communicators Every build is associated with a single -[communicator](/docs/templates/communicator.html). Communicators are -used to establish a connection for provisioning a remote machine (such -as an AWS instance or local virtual machine). +[communicator](/docs/templates/communicator.html). Communicators are used to +establish a connection for provisioning a remote machine (such as an AWS +instance or local virtual machine). -All the examples for the various builders show some communicator (usually -SSH), but the communicators are highly customizable so we recommend -reading the +All the examples for the various builders show some communicator (usually SSH), +but the communicators are highly customizable so we recommend reading the [communicator documentation](/docs/templates/communicator.html). diff --git a/website/source/docs/templates/configuration-templates.html.markdown b/website/source/docs/templates/configuration-templates.html.markdown index cef1385d3..9bc8f835e 100644 --- a/website/source/docs/templates/configuration-templates.html.markdown +++ b/website/source/docs/templates/configuration-templates.html.markdown @@ -1,40 +1,42 @@ --- -layout: "docs" -page_title: "Configuration Templates" -description: |- - All strings within templates are processed by a common Packer templating engine, where variables and functions can be used to modify the value of a configuration parameter at runtime. ---- +description: | + All strings within templates are processed by a common Packer templating engine, + where variables and functions can be used to modify the value of a configuration + parameter at runtime. +layout: docs +page_title: Configuration Templates +... # Configuration Templates -All strings within templates are processed by a common Packer templating -engine, where variables and functions can be used to modify the value of -a configuration parameter at runtime. +All strings within templates are processed by a common Packer templating engine, +where variables and functions can be used to modify the value of a configuration +parameter at runtime. -For example, the `{{timestamp}}` function can be used in any string to -generate the current timestamp. This is useful for configurations that require -unique keys, such as AMI names. By setting the AMI name to something like +For example, the `{{timestamp}}` function can be used in any string to generate +the current timestamp. This is useful for configurations that require unique +keys, such as AMI names. By setting the AMI name to something like `My Packer AMI {{timestamp}}`, the AMI name will be unique down to the second. -In addition to globally available functions like timestamp shown before, -some configurations have special local variables that are available only -for that configuration. These are recognizable because they're prefixed by -a period, such as `{{.Name}}`. +In addition to globally available functions like timestamp shown before, some +configurations have special local variables that are available only for that +configuration. These are recognizable because they're prefixed by a period, such +as `{{.Name}}`. -The complete syntax is covered in the next section, followed by a reference -of globally available functions. +The complete syntax is covered in the next section, followed by a reference of +globally available functions. ## Syntax -The syntax of templates is extremely simple. Anything template related -happens within double-braces: `{{ }}`. Variables are prefixed with a period -and capitalized, such as `{{.Variable}}` and functions are just directly -within the braces, such as `{{timestamp}}`. +The syntax of templates is extremely simple. Anything template related happens +within double-braces: `{{ }}`. Variables are prefixed with a period and +capitalized, such as `{{.Variable}}` and functions are just directly within the +braces, such as `{{timestamp}}`. Here is an example from the VMware VMX template that shows configuration templates in action: -```liquid +``` {.liquid} .encoding = "UTF-8" displayName = "{{ .Name }}" guestOS = "{{ .GuestOS }}" @@ -43,7 +45,7 @@ guestOS = "{{ .GuestOS }}" In this case, the "Name" and "GuestOS" variables will be replaced, potentially resulting in a VMX that looks like this: -```liquid +``` {.liquid} .encoding = "UTF-8" displayName = "packer" guestOS = "otherlinux" @@ -52,70 +54,126 @@ guestOS = "otherlinux" ## Global Functions While some configuration settings have local variables specific to only that -configuration, a set of functions are available globally for use in _any string_ +configuration, a set of functions are available globally for use in *any string* in Packer templates. These are listed below for reference. -* `build_name` - The name of the build being run. -* `build_type` - The type of the builder being used currently. -* `isotime [FORMAT]` - UTC time, which can be [formatted](http://golang.org/pkg/time/#example_Time_Format). - See more examples below. -* `lower` - Lowercases the string. -* `pwd` - The working directory while executing Packer. -* `template_dir` - The directory to the template for the build. -* `timestamp` - The current Unix timestamp in UTC. -* `uuid` - Returns a random UUID. -* `upper` - Uppercases the string. +- `build_name` - The name of the build being run. +- `build_type` - The type of the builder being used currently. +- `isotime [FORMAT]` - UTC time, which can be + [formatted](http://golang.org/pkg/time/#example_Time_Format). See more + examples below. +- `lower` - Lowercases the string. +- `pwd` - The working directory while executing Packer. +- `template_dir` - The directory to the template for the build. +- `timestamp` - The current Unix timestamp in UTC. +- `uuid` - Returns a random UUID. +- `upper` - Uppercases the string. ### isotime Format -Formatting for the function `isotime` uses the magic reference date -**Mon Jan 2 15:04:05 -0700 MST 2006**, which breaks down to the following: +Formatting for the function `isotime` uses the magic reference date **Mon Jan 2 +15:04:05 -0700 MST 2006**, which breaks down to the following:
    + - - - - - - - - - + + + + + + + + + - - - - - - - - - + + + + + + + + + - - - - - - - - - + + + + + + + + +
    Day of WeekMonthDateHourMinuteSecondYearTimezone + +Day of Week + +Month + +Date + +Hour + +Minute + +Second + +Year + +Timezone +
    Numeric-010203 (15)040506-0700 +Numeric + +- + +01 + +02 + +03 (15) + +04 + +05 + +06 + +-0700 +
    TextualMonday (Mon)January (Jan)-----MST +Textual + +Monday (Mon) + +January (Jan) + +- + +- + +- + +- + +- + +MST +
    +
    - _The values in parentheses are the abbreviated, or 24-hour clock values_ +*The values in parentheses are the abbreviated, or 24-hour clock values* - Here are some example formated time, using the above format options: +Here are some example formated time, using the above format options: -```liquid +``` {.liquid} isotime = June 7, 7:22:43pm 2014 {{isotime "2006-01-02"}} = 2014-06-07 @@ -126,7 +184,7 @@ isotime = June 7, 7:22:43pm 2014 Please note that double quote characters need escaping inside of templates: -```javascript +``` {.javascript} { "builders": [ { @@ -147,6 +205,6 @@ Please note that double quote characters need escaping inside of templates: Specific to Amazon builders: -* ``clean_ami_name`` - AMI names can only contain certain characters. This +- `clean_ami_name` - AMI names can only contain certain characters. This function will replace illegal characters with a '-" character. Example usage since ":" is not a legal AMI name is: `{{isotime | clean_ami_name}}`. diff --git a/website/source/docs/templates/introduction.html.markdown b/website/source/docs/templates/introduction.html.markdown index 3dc363916..1d67ea196 100644 --- a/website/source/docs/templates/introduction.html.markdown +++ b/website/source/docs/templates/introduction.html.markdown @@ -1,21 +1,25 @@ --- -layout: "docs" -page_title: "Templates" -description: |- - Templates are JSON files that configure the various components of Packer in order to create one or more machine images. Templates are portable, static, and readable and writable by both humans and computers. This has the added benefit of being able to not only create and modify templates by hand, but also write scripts to dynamically create or modify templates. ---- +description: | + Templates are JSON files that configure the various components of Packer in + order to create one or more machine images. Templates are portable, static, and + readable and writable by both humans and computers. This has the added benefit + of being able to not only create and modify templates by hand, but also write + scripts to dynamically create or modify templates. +layout: docs +page_title: Templates +... # Templates -Templates are JSON files that configure the various components of Packer -in order to create one or more machine images. Templates are portable, static, -and readable and writable by both humans and computers. This has the added -benefit of being able to not only create and modify templates by hand, but -also write scripts to dynamically create or modify templates. +Templates are JSON files that configure the various components of Packer in +order to create one or more machine images. Templates are portable, static, and +readable and writable by both humans and computers. This has the added benefit +of being able to not only create and modify templates by hand, but also write +scripts to dynamically create or modify templates. -Templates are given to commands such as `packer build`, which will -take the template and actually run the builds within it, producing -any resulting machine images. +Templates are given to commands such as `packer build`, which will take the +template and actually run the builds within it, producing any resulting machine +images. ## Template Structure @@ -23,64 +27,64 @@ A template is a JSON object that has a set of keys configuring various components of Packer. The available keys within a template are listed below. Along with each key, it is noted whether it is required or not. -* `builders` (_required_) is an array of one or more objects that defines - the builders that will be used to create machine images for this template, - and configures each of those builders. For more information on how to define - and configure a builder, read the sub-section on - [configuring builders in templates](/docs/templates/builders.html). +- `builders` (*required*) is an array of one or more objects that defines the + builders that will be used to create machine images for this template, and + configures each of those builders. For more information on how to define and + configure a builder, read the sub-section on [configuring builders in + templates](/docs/templates/builders.html). -* `description` (optional) is a string providing a description of what - the template does. This output is used only in the - [inspect command](/docs/command-line/inspect.html). +- `description` (optional) is a string providing a description of what the + template does. This output is used only in the [inspect + command](/docs/command-line/inspect.html). -* `min_packer_version` (optional) is a string that has a minimum Packer - version that is required to parse the template. This can be used to - ensure that proper versions of Packer are used with the template. A - max version can't be specified because Packer retains backwards - compatibility with `packer fix`. +- `min_packer_version` (optional) is a string that has a minimum Packer version + that is required to parse the template. This can be used to ensure that proper + versions of Packer are used with the template. A max version can't be + specified because Packer retains backwards compatibility with `packer fix`. -* `post-processors` (optional) is an array of one or more objects that defines the - various post-processing steps to take with the built images. If not specified, - then no post-processing will be done. For more - information on what post-processors do and how they're defined, read the - sub-section on [configuring post-processors in templates](/docs/templates/post-processors.html). +- `post-processors` (optional) is an array of one or more objects that defines + the various post-processing steps to take with the built images. If not + specified, then no post-processing will be done. For more information on what + post-processors do and how they're defined, read the sub-section on + [configuring post-processors in + templates](/docs/templates/post-processors.html). -* `provisioners` (optional) is an array of one or more objects that defines - the provisioners that will be used to install and configure software for - the machines created by each of the builders. If it is not specified, - then no provisioners will be run. For more - information on how to define and configure a provisioner, read the - sub-section on [configuring provisioners in templates](/docs/templates/provisioners.html). +- `provisioners` (optional) is an array of one or more objects that defines the + provisioners that will be used to install and configure software for the + machines created by each of the builders. If it is not specified, then no + provisioners will be run. For more information on how to define and configure + a provisioner, read the sub-section on [configuring provisioners in + templates](/docs/templates/provisioners.html). -* `variables` (optional) is an array of one or more key/value strings that defines - user variables contained in the template. - If it is not specified, then no variables are defined. - For more information on how to define and use user variables, read the - sub-section on [user variables in templates](/docs/templates/user-variables.html). +- `variables` (optional) is an array of one or more key/value strings that + defines user variables contained in the template. If it is not specified, then + no variables are defined. For more information on how to define and use user + variables, read the sub-section on [user variables in + templates](/docs/templates/user-variables.html). ## Comments JSON doesn't support comments and Packer reports unknown keys as validation -errors. If you'd like to comment your template, you can prefix a _root level_ +errors. If you'd like to comment your template, you can prefix a *root level* key with an underscore. Example: -```javascript +``` {.javascript} { "_comment": "This is a comment", "builders": [{}] } ``` -**Important:** Only _root level_ keys can be underscore prefixed. Keys within +**Important:** Only *root level* keys can be underscore prefixed. Keys within builders, provisioners, etc. will still result in validation errors. ## Example Template -Below is an example of a basic template that is nearly fully functional. It is just -missing valid AWS access keys. Otherwise, it would work properly with +Below is an example of a basic template that is nearly fully functional. It is +just missing valid AWS access keys. Otherwise, it would work properly with `packer build`. -```javascript +``` {.javascript} { "builders": [ { diff --git a/website/source/docs/templates/post-processors.html.markdown b/website/source/docs/templates/post-processors.html.markdown index 7a7ba4664..2c71e6664 100644 --- a/website/source/docs/templates/post-processors.html.markdown +++ b/website/source/docs/templates/post-processors.html.markdown @@ -1,27 +1,30 @@ --- -layout: "docs" -page_title: "Templates: Post-Processors" -description: |- - The post-processor section within a template configures any post-processing that will be done to images built by the builders. Examples of post-processing would be compressing files, uploading artifacts, etc. ---- +description: | + The post-processor section within a template configures any post-processing that + will be done to images built by the builders. Examples of post-processing would + be compressing files, uploading artifacts, etc. +layout: docs +page_title: 'Templates: Post-Processors' +... # Templates: Post-Processors -The post-processor section within a template configures any post-processing -that will be done to images built by the builders. Examples of post-processing -would be compressing files, uploading artifacts, etc. +The post-processor section within a template configures any post-processing that +will be done to images built by the builders. Examples of post-processing would +be compressing files, uploading artifacts, etc. -Post-processors are _optional_. If no post-processors are defined within a template, -then no post-processing will be done to the image. The resulting artifact of -a build is just the image outputted by the builder. +Post-processors are *optional*. If no post-processors are defined within a +template, then no post-processing will be done to the image. The resulting +artifact of a build is just the image outputted by the builder. This documentation page will cover how to configure a post-processor in a template. The specific configuration options available for each post-processor, -however, must be referenced from the documentation for that specific post-processor. +however, must be referenced from the documentation for that specific +post-processor. Within a template, a section of post-processor definitions looks like this: -```javascript +``` {.javascript} { "post-processors": [ // ... one or more post-processor definitions here @@ -38,29 +41,29 @@ apply to, if you wish. ## Post-Processor Definition -Within the `post-processors` array in a template, there are three ways to -define a post-processor. There are _simple_ definitions, _detailed_ definitions, -and _sequence_ definitions. Don't worry, they're all very easy to understand, -and the "simple" and "detailed" definitions are simply shortcuts for the -"sequence" definition. +Within the `post-processors` array in a template, there are three ways to define +a post-processor. There are *simple* definitions, *detailed* definitions, and +*sequence* definitions. Don't worry, they're all very easy to understand, and +the "simple" and "detailed" definitions are simply shortcuts for the "sequence" +definition. A **simple definition** is just a string; the name of the post-processor. An -example is shown below. Simple definitions are used when no additional configuration -is needed for the post-processor. +example is shown below. Simple definitions are used when no additional +configuration is needed for the post-processor. -```javascript +``` {.javascript} { "post-processors": ["compress"] } ``` -A **detailed definition** is a JSON object. It is very similar to a builder -or provisioner definition. It contains a `type` field to denote the type of -the post-processor, but may also contain additional configuration for the -post-processor. A detailed definition is used when additional configuration -is needed beyond simply the type for the post-processor. An example is shown below. +A **detailed definition** is a JSON object. It is very similar to a builder or +provisioner definition. It contains a `type` field to denote the type of the +post-processor, but may also contain additional configuration for the +post-processor. A detailed definition is used when additional configuration is +needed beyond simply the type for the post-processor. An example is shown below. -```javascript +``` {.javascript} { "post-processors": [ { @@ -72,14 +75,14 @@ is needed beyond simply the type for the post-processor. An example is shown bel ``` A **sequence definition** is a JSON array comprised of other **simple** or -**detailed** definitions. The post-processors defined in the array are run -in order, with the artifact of each feeding into the next, and any intermediary +**detailed** definitions. The post-processors defined in the array are run in +order, with the artifact of each feeding into the next, and any intermediary artifacts being discarded. A sequence definition may not contain another sequence definition. Sequence definitions are used to chain together multiple post-processors. An example is shown below, where the artifact of a build is compressed then uploaded, but the compressed result is not kept. -```javascript +``` {.javascript} { "post-processors": [ [ @@ -90,21 +93,21 @@ compressed then uploaded, but the compressed result is not kept. } ``` -As you may be able to imagine, the **simple** and **detailed** definitions -are simply shortcuts for a **sequence** definition of only one element. +As you may be able to imagine, the **simple** and **detailed** definitions are +simply shortcuts for a **sequence** definition of only one element. ## Input Artifacts -When using post-processors, the input artifact (coming from a builder or -another post-processor) is discarded by default after the post-processor runs. -This is because generally, you don't want the intermediary artifacts on the -way to the final artifact created. +When using post-processors, the input artifact (coming from a builder or another +post-processor) is discarded by default after the post-processor runs. This is +because generally, you don't want the intermediary artifacts on the way to the +final artifact created. -In some cases, however, you may want to keep the intermediary artifacts. -You can tell Packer to keep these artifacts by setting the -`keep_input_artifact` configuration to `true`. An example is shown below: +In some cases, however, you may want to keep the intermediary artifacts. You can +tell Packer to keep these artifacts by setting the `keep_input_artifact` +configuration to `true`. An example is shown below: -```javascript +``` {.javascript} { "post-processors": [ { @@ -115,39 +118,37 @@ You can tell Packer to keep these artifacts by setting the } ``` -This setting will only keep the input artifact to _that specific_ -post-processor. If you're specifying a sequence of post-processors, then -all intermediaries are discarded by default except for the input artifacts -to post-processors that explicitly state to keep the input artifact. +This setting will only keep the input artifact to *that specific* +post-processor. If you're specifying a sequence of post-processors, then all +intermediaries are discarded by default except for the input artifacts to +post-processors that explicitly state to keep the input artifact. --> **Note:** The intuitive reader may be wondering what happens -if multiple post-processors are specified (not in a sequence). Does Packer require the -configuration to keep the input artifact on all the post-processors? -The answer is no, of course not. Packer is smart enough to figure out -that at least one post-processor requested that the input be kept, so it will keep -it around. +-> **Note:** The intuitive reader may be wondering what happens if multiple +post-processors are specified (not in a sequence). Does Packer require the +configuration to keep the input artifact on all the post-processors? The answer +is no, of course not. Packer is smart enough to figure out that at least one +post-processor requested that the input be kept, so it will keep it around. ## Run on Specific Builds -You can use the `only` or `except` configurations to run a post-processor -only with specific builds. These two configurations do what you expect: -`only` will only run the post-processor on the specified builds and -`except` will run the post-processor on anything other than the specified -builds. +You can use the `only` or `except` configurations to run a post-processor only +with specific builds. These two configurations do what you expect: `only` will +only run the post-processor on the specified builds and `except` will run the +post-processor on anything other than the specified builds. -An example of `only` being used is shown below, but the usage of `except` -is effectively the same. `only` and `except` can only be specified on "detailed" -configurations. If you have a sequence of post-processors to run, `only` -and `except` will only affect that single post-processor in the sequence. +An example of `only` being used is shown below, but the usage of `except` is +effectively the same. `only` and `except` can only be specified on "detailed" +configurations. If you have a sequence of post-processors to run, `only` and +`except` will only affect that single post-processor in the sequence. -```javascript +``` {.javascript} { "type": "vagrant", "only": ["virtualbox-iso"] } ``` -The values within `only` or `except` are _build names_, not builder -types. If you recall, build names by default are just their builder type, -but if you specify a custom `name` parameter, then you should use that -as the value instead of the type. +The values within `only` or `except` are *build names*, not builder types. If +you recall, build names by default are just their builder type, but if you +specify a custom `name` parameter, then you should use that as the value instead +of the type. diff --git a/website/source/docs/templates/provisioners.html.markdown b/website/source/docs/templates/provisioners.html.markdown index 00578bb86..9f4acc9cb 100644 --- a/website/source/docs/templates/provisioners.html.markdown +++ b/website/source/docs/templates/provisioners.html.markdown @@ -1,9 +1,11 @@ --- -layout: "docs" -page_title: "Templates: Provisioners" -description: |- - Within the template, the provisioners section contains an array of all the provisioners that Packer should use to install and configure software within running machines prior to turning them into machine images. ---- +description: | + Within the template, the provisioners section contains an array of all the + provisioners that Packer should use to install and configure software within + running machines prior to turning them into machine images. +layout: docs +page_title: 'Templates: Provisioners' +... # Templates: Provisioners @@ -11,19 +13,18 @@ Within the template, the provisioners section contains an array of all the provisioners that Packer should use to install and configure software within running machines prior to turning them into machine images. -Provisioners are _optional_. If no provisioners are defined within a template, -then no software other than the defaults will be installed within the -resulting machine images. This is not typical, however, since much of the -value of Packer is to produce multiple identical images -of pre-configured software. +Provisioners are *optional*. If no provisioners are defined within a template, +then no software other than the defaults will be installed within the resulting +machine images. This is not typical, however, since much of the value of Packer +is to produce multiple identical images of pre-configured software. This documentation page will cover how to configure a provisioner in a template. -The specific configuration options available for each provisioner, however, -must be referenced from the documentation for that specific provisioner. +The specific configuration options available for each provisioner, however, must +be referenced from the documentation for that specific provisioner. Within a template, a section of provisioner definitions looks like this: -```javascript +``` {.javascript} { "provisioners": [ // ... one or more provisioner definitions here @@ -31,25 +32,24 @@ Within a template, a section of provisioner definitions looks like this: } ``` -For each of the definitions, Packer will run the provisioner for each -of the configured builds. The provisioners will be run in the order -they are defined within the template. +For each of the definitions, Packer will run the provisioner for each of the +configured builds. The provisioners will be run in the order they are defined +within the template. ## Provisioner Definition -A provisioner definition is a JSON object that must contain at least -the `type` key. This key specifies the name of the provisioner to use. -Additional keys within the object are used to configure the provisioner, -with the exception of a handful of special keys, covered later. +A provisioner definition is a JSON object that must contain at least the `type` +key. This key specifies the name of the provisioner to use. Additional keys +within the object are used to configure the provisioner, with the exception of a +handful of special keys, covered later. -As an example, the "shell" provisioner requires a key such as `script` -which specifies a path to a shell script to execute within the machines -being created. +As an example, the "shell" provisioner requires a key such as `script` which +specifies a path to a shell script to execute within the machines being created. An example provisioner definition is shown below, configuring the shell provisioner to run a local script within the machines: -```javascript +``` {.javascript} { "type": "shell", "script": "script.sh" @@ -58,16 +58,15 @@ provisioner to run a local script within the machines: ## Run on Specific Builds -You can use the `only` or `except` configurations to run a provisioner -only with specific builds. These two configurations do what you expect: -`only` will only run the provisioner on the specified builds and -`except` will run the provisioner on anything other than the specified -builds. +You can use the `only` or `except` configurations to run a provisioner only with +specific builds. These two configurations do what you expect: `only` will only +run the provisioner on the specified builds and `except` will run the +provisioner on anything other than the specified builds. -An example of `only` being used is shown below, but the usage of `except` -is effectively the same: +An example of `only` being used is shown below, but the usage of `except` is +effectively the same: -```javascript +``` {.javascript} { "type": "shell", "script": "script.sh", @@ -75,21 +74,21 @@ is effectively the same: } ``` -The values within `only` or `except` are _build names_, not builder -types. If you recall, build names by default are just their builder type, -but if you specify a custom `name` parameter, then you should use that -as the value instead of the type. +The values within `only` or `except` are *build names*, not builder types. If +you recall, build names by default are just their builder type, but if you +specify a custom `name` parameter, then you should use that as the value instead +of the type. ## Build-Specific Overrides -While the goal of Packer is to produce identical machine images, it -sometimes requires periods of time where the machines are different before -they eventually converge to be identical. In these cases, different configurations -for provisioners may be necessary depending on the build. This can be done -using build-specific overrides. +While the goal of Packer is to produce identical machine images, it sometimes +requires periods of time where the machines are different before they eventually +converge to be identical. In these cases, different configurations for +provisioners may be necessary depending on the build. This can be done using +build-specific overrides. -An example of where this might be necessary is when building both an EC2 AMI -and a VMware machine. The source EC2 AMI may setup a user with administrative +An example of where this might be necessary is when building both an EC2 AMI and +a VMware machine. The source EC2 AMI may setup a user with administrative privileges by default, whereas the VMware machine doesn't have these privileges. In this case, the shell script may need to be executed differently. Of course, the goal is that hopefully the shell script converges these two images to be @@ -97,7 +96,7 @@ identical. However, they may initially need to be run differently. This example is shown below: -```javascript +``` {.javascript} { "type": "shell", "script": "script.sh", @@ -111,24 +110,23 @@ This example is shown below: ``` As you can see, the `override` key is used. The value of this key is another -JSON object where the key is the name of a [builder definition](/docs/templates/builders.html). -The value of this is in turn another JSON object. This JSON object simply -contains the provisioner configuration as normal. This configuration is merged -into the default provisioner configuration. +JSON object where the key is the name of a [builder +definition](/docs/templates/builders.html). The value of this is in turn another +JSON object. This JSON object simply contains the provisioner configuration as +normal. This configuration is merged into the default provisioner configuration. ## Pausing Before Running -With certain provisioners it is sometimes desirable to pause for some period -of time before running it. Specifically, in cases where a provisioner reboots -the machine, you may want to wait for some period of time before starting -the next provisioner. +With certain provisioners it is sometimes desirable to pause for some period of +time before running it. Specifically, in cases where a provisioner reboots the +machine, you may want to wait for some period of time before starting the next +provisioner. Every provisioner definition in a Packer template can take a special -configuration `pause_before` that is the amount of time to pause before -running that provisioner. By default, there is no pause. An example -is shown below: +configuration `pause_before` that is the amount of time to pause before running +that provisioner. By default, there is no pause. An example is shown below: -```javascript +``` {.javascript} { "type": "shell", "script": "script.sh", @@ -136,5 +134,5 @@ is shown below: } ``` -For the above provisioner, Packer will wait 10 seconds before uploading -and executing the shell script. +For the above provisioner, Packer will wait 10 seconds before uploading and +executing the shell script. diff --git a/website/source/docs/templates/push.html.markdown b/website/source/docs/templates/push.html.markdown index 568b45ec1..3ca2c2de2 100644 --- a/website/source/docs/templates/push.html.markdown +++ b/website/source/docs/templates/push.html.markdown @@ -1,19 +1,19 @@ --- -layout: "docs" -page_title: "Templates: Push" -description: |- - Within the template, the push section configures how a template can be - pushed to a remote build service. ---- +description: | + Within the template, the push section configures how a template can be pushed to + a remote build service. +layout: docs +page_title: 'Templates: Push' +... # Templates: Push Within the template, the push section configures how a template can be [pushed](/docs/command-line/push.html) to a remote build service. -Push configuration is responsible for defining what files are required -to build this template, what the name of build configuration is in the -build service, etc. +Push configuration is responsible for defining what files are required to build +this template, what the name of build configuration is in the build service, +etc. The only build service that Packer can currently push to is [Atlas](https://atlas.hashicorp.com) by HashiCorp. Support for other build @@ -21,7 +21,7 @@ services will come in the form of plugins in the future. Within a template, a push configuration section looks like this: -```javascript +``` {.javascript} { "push": { // ... push configuration here @@ -37,37 +37,37 @@ each category, the available configuration keys are alphabetized. ### Required -* `name` (string) - Name of the build configuration in the build service. - If this doesn't exist, it will be created (by default). +- `name` (string) - Name of the build configuration in the build service. If + this doesn't exist, it will be created (by default). ### Optional -* `address` (string) - The address of the build service to use. By default - this is `https://atlas.hashicorp.com`. +- `address` (string) - The address of the build service to use. By default this + is `https://atlas.hashicorp.com`. -* `base_dir` (string) - The base directory of the files to upload. This - will be the current working directory when the build service executes your - template. This path is relative to the template. +- `base_dir` (string) - The base directory of the files to upload. This will be + the current working directory when the build service executes your template. + This path is relative to the template. -* `include` (array of strings) - Glob patterns to include relative to - the `base_dir`. If this is specified, only files that match the include - pattern are included. +- `include` (array of strings) - Glob patterns to include relative to the + `base_dir`. If this is specified, only files that match the include pattern + are included. -* `exclude` (array of strings) - Glob patterns to exclude relative to - the `base_dir`. +- `exclude` (array of strings) - Glob patterns to exclude relative to the + `base_dir`. -* `token` (string) - An access token to use to authenticate to the build - service. +- `token` (string) - An access token to use to authenticate to the + build service. -* `vcs` (boolean) - If true, Packer will detect your VCS (if there is one) - and only upload the files that are tracked by the VCS. This is useful - for automatically excluding ignored files. This defaults to false. +- `vcs` (boolean) - If true, Packer will detect your VCS (if there is one) and + only upload the files that are tracked by the VCS. This is useful for + automatically excluding ignored files. This defaults to false. ## Examples A push configuration section with minimal options: -```javascript +``` {.javascript} { "push": { "name": "hashicorp/precise64" @@ -78,7 +78,7 @@ A push configuration section with minimal options: A push configuration specifying Packer to inspect the VCS and list individual files to include: -```javascript +``` {.javascript} { "push": { "name": "hashicorp/precise64", diff --git a/website/source/docs/templates/user-variables.html.markdown b/website/source/docs/templates/user-variables.html.markdown index d80662dea..30c9555bf 100644 --- a/website/source/docs/templates/user-variables.html.markdown +++ b/website/source/docs/templates/user-variables.html.markdown @@ -1,35 +1,38 @@ --- -layout: "docs" -page_title: "User Variables in Templates" -description: |- - User variables allow your templates to be further configured with variables from the command-line, environmental variables, or files. This lets you parameterize your templates so that you can keep secret tokens, environment-specific data, and other types of information out of your templates. This maximizes the portability and shareability of the template. ---- +description: | + User variables allow your templates to be further configured with variables from + the command-line, environmental variables, or files. This lets you parameterize + your templates so that you can keep secret tokens, environment-specific data, + and other types of information out of your templates. This maximizes the + portability and shareability of the template. +layout: docs +page_title: User Variables in Templates +... # User Variables -User variables allow your templates to be further configured with variables -from the command-line, environmental variables, or files. This lets you -parameterize your templates so that you can keep secret tokens, -environment-specific data, and other types of information out of your -templates. This maximizes the portability and shareability of the template. +User variables allow your templates to be further configured with variables from +the command-line, environmental variables, or files. This lets you parameterize +your templates so that you can keep secret tokens, environment-specific data, +and other types of information out of your templates. This maximizes the +portability and shareability of the template. -Using user variables expects you know how -[configuration templates](/docs/templates/configuration-templates.html) work. -If you don't know how configuration templates work yet, please read that -page first. +Using user variables expects you know how [configuration +templates](/docs/templates/configuration-templates.html) work. If you don't know +how configuration templates work yet, please read that page first. ## Usage User variables must first be defined in a `variables` section within your -template. Even if you want a variable to default to an empty string, it -must be defined. This explicitness makes it easy for newcomers to your -template to understand what can be modified using variables in your template. +template. Even if you want a variable to default to an empty string, it must be +defined. This explicitness makes it easy for newcomers to your template to +understand what can be modified using variables in your template. -The `variables` section is a simple key/value mapping of the variable -name to a default value. A default value can be the empty string. An -example is shown below: +The `variables` section is a simple key/value mapping of the variable name to a +default value. A default value can be the empty string. An example is shown +below: -```javascript +``` {.javascript} { "variables": { "aws_access_key": "", @@ -46,28 +49,27 @@ example is shown below: ``` In the above example, the template defines two variables: `aws_access_key` and -`aws_secret_key`. They default to empty values. -Later, the variables are used within the builder we defined in order to -configure the actual keys for the Amazon builder. +`aws_secret_key`. They default to empty values. Later, the variables are used +within the builder we defined in order to configure the actual keys for the +Amazon builder. -If the default value is `null`, then the user variable will be _required_. -This means that the user must specify a value for this variable or template +If the default value is `null`, then the user variable will be *required*. This +means that the user must specify a value for this variable or template validation will fail. -Using the variables is extremely easy. Variables are used by calling -the user function in the form of {{user `variable`}}. -This function can be used in _any value_ within the template, in -builders, provisioners, _anything_. The user variable is available globally -within the template. +Using the variables is extremely easy. Variables are used by calling the user +function in the form of {{user \`variable\`}}. This function can be +used in *any value* within the template, in builders, provisioners, *anything*. +The user variable is available globally within the template. ## Environmental Variables -Environmental variables can be used within your template using user -variables. The `env` function is available _only_ within the default value -of a user variable, allowing you to default a user variable to an -environmental variable. An example is shown below: +Environmental variables can be used within your template using user variables. +The `env` function is available *only* within the default value of a user +variable, allowing you to default a user variable to an environmental variable. +An example is shown below: -```javascript +``` {.javascript} { "variables": { "my_secret": "{{env `MY_SECRET`}}", @@ -77,73 +79,69 @@ environmental variable. An example is shown below: } ``` -This will default "my\_secret" to be the value of the "MY\_SECRET" -environmental variable (or the empty string if it does not exist). +This will default "my\_secret" to be the value of the "MY\_SECRET" environmental +variable (or the empty string if it does not exist). --> **Why can't I use environmental variables elsewhere?** -User variables are the single source of configurable input to a template. -We felt that having environmental variables used _anywhere_ in a -template would confuse the user about the possible inputs to a template. -By allowing environmental variables only within default values for user -variables, user variables remain as the single source of input to a template -that a user can easily discover using `packer inspect`. +-> **Why can't I use environmental variables elsewhere?** User variables are +the single source of configurable input to a template. We felt that having +environmental variables used *anywhere* in a template would confuse the user +about the possible inputs to a template. By allowing environmental variables +only within default values for user variables, user variables remain as the +single source of input to a template that a user can easily discover using +`packer inspect`. ## Setting Variables -Now that we covered how to define and use variables within a template, -the next important point is how to actually set these variables. Packer -exposes two methods for setting variables: from the command line or -from a file. +Now that we covered how to define and use variables within a template, the next +important point is how to actually set these variables. Packer exposes two +methods for setting variables: from the command line or from a file. ### From the Command Line -To set variables from the command line, the `-var` flag is used as -a parameter to `packer build` (and some other commands). Continuing our example -above, we could build our template using the command below. The command -is split across multiple lines for readability, but can of course be a single -line. +To set variables from the command line, the `-var` flag is used as a parameter +to `packer build` (and some other commands). Continuing our example above, we +could build our template using the command below. The command is split across +multiple lines for readability, but can of course be a single line. -```text +``` {.text} $ packer build \ -var 'aws_access_key=foo' \ -var 'aws_secret_key=bar' \ template.json ``` -As you can see, the `-var` flag can be specified multiple times in order -to set multiple variables. Also, variables set later on the command-line -override earlier set variables if it has already been set. +As you can see, the `-var` flag can be specified multiple times in order to set +multiple variables. Also, variables set later on the command-line override +earlier set variables if it has already been set. -Finally, variables set from the command-line override all other methods -of setting variables. So if you specify a variable in a file (the next -method shown), you can override it using the command-line. +Finally, variables set from the command-line override all other methods of +setting variables. So if you specify a variable in a file (the next method +shown), you can override it using the command-line. ### From a File -Variables can also be set from an external JSON file. The `-var-file` -flag reads a file containing a basic key/value mapping of variables to -values and sets those variables. The JSON file is simple: +Variables can also be set from an external JSON file. The `-var-file` flag reads +a file containing a basic key/value mapping of variables to values and sets +those variables. The JSON file is simple: -```javascript +``` {.javascript} { "aws_access_key": "foo", "aws_secret_key": "bar" } ``` -It is a single JSON object where the keys are variables and the values are -the variable values. Assuming this file is in `variables.json`, we can -build our template using the following command: +It is a single JSON object where the keys are variables and the values are the +variable values. Assuming this file is in `variables.json`, we can build our +template using the following command: -```text +``` {.text} $ packer build -var-file=variables.json template.json ``` -The `-var-file` flag can be specified multiple times and variables from -multiple files will be read and applied. As you'd expect, variables read -from files specified later override a variable set earlier if it has -already been set. +The `-var-file` flag can be specified multiple times and variables from multiple +files will be read and applied. As you'd expect, variables read from files +specified later override a variable set earlier if it has already been set. -And as mentioned above, no matter where a `-var-file` is specified, a -`-var` flag on the command line will always override any variables from -a file. +And as mentioned above, no matter where a `-var-file` is specified, a `-var` +flag on the command line will always override any variables from a file. diff --git a/website/source/docs/templates/veewee-to-packer.html.markdown b/website/source/docs/templates/veewee-to-packer.html.markdown index 81a06de71..ecc257f14 100644 --- a/website/source/docs/templates/veewee-to-packer.html.markdown +++ b/website/source/docs/templates/veewee-to-packer.html.markdown @@ -1,35 +1,39 @@ --- -layout: "docs" -page_title: "Convert Veewee Definitions to Packer Templates" -description: |- - If you are or were a user of Veewee, then there is an official tool called veewee-to-packer that will convert your Veewee definition into an equivalent Packer template. Even if you're not a Veewee user, Veewee has a large library of templates that can be readily used with Packer by simply converting them. ---- +description: | + If you are or were a user of Veewee, then there is an official tool called + veewee-to-packer that will convert your Veewee definition into an equivalent + Packer template. Even if you're not a Veewee user, Veewee has a large library of + templates that can be readily used with Packer by simply converting them. +layout: docs +page_title: Convert Veewee Definitions to Packer Templates +... # Veewee-to-Packer -If you are or were a user of [Veewee](https://github.com/jedi4ever/veewee), -then there is an official tool called [veewee-to-packer](https://github.com/mitchellh/veewee-to-packer) -that will convert your Veewee definition into an equivalent Packer template. -Even if you're not a Veewee user, Veewee has a -[large library](https://github.com/jedi4ever/veewee/tree/master/templates) -of templates that can be readily used with Packer by simply converting them. +If you are or were a user of [Veewee](https://github.com/jedi4ever/veewee), then +there is an official tool called +[veewee-to-packer](https://github.com/mitchellh/veewee-to-packer) that will +convert your Veewee definition into an equivalent Packer template. Even if +you're not a Veewee user, Veewee has a [large +library](https://github.com/jedi4ever/veewee/tree/master/templates) of templates +that can be readily used with Packer by simply converting them. ## Installation and Usage Since Veewee itself is a Ruby project, so too is the veewee-to-packer -application so that it can read the Veewee configurations. Install it using RubyGems: +application so that it can read the Veewee configurations. Install it using +RubyGems: -```text +``` {.text} $ gem install veewee-to-packer ... ``` -Once installed, usage is easy! Just point `veewee-to-packer` -at the `definition.rb` file of any template. The converter will output -any warnings or messages about the conversion. The example below converts -a CentOS template: +Once installed, usage is easy! Just point `veewee-to-packer` at the +`definition.rb` file of any template. The converter will output any warnings or +messages about the conversion. The example below converts a CentOS template: -```text +``` {.text} $ veewee-to-packer templates/CentOS-6.4/definition.rb Success! Your Veewee definition was converted to a Packer template! The template can be found in the `template.json` file @@ -41,22 +45,21 @@ first, since the template has relative paths that expect you to use it from the same working directory. ``` -***Voila!*** By default, `veewee-to-packer` will output a template that -contains a builder for both VirtualBox and VMware. You can use the -`-only` flag on `packer build` to only build one of them. Otherwise -you can use the `--builder` flag on `veewee-to-packer` to only output -specific builder configurations. +***Voila!*** By default, `veewee-to-packer` will output a template that contains +a builder for both VirtualBox and VMware. You can use the `-only` flag on +`packer build` to only build one of them. Otherwise you can use the `--builder` +flag on `veewee-to-packer` to only output specific builder configurations. ## Limitations -None, really. The tool will tell you if it can't convert a part of a -template, and whether that is a critical error or just a warning. -Most of Veewee's functions translate perfectly over to Packer. There are -still a couple missing features in Packer, but they're minimal. +None, really. The tool will tell you if it can't convert a part of a template, +and whether that is a critical error or just a warning. Most of Veewee's +functions translate perfectly over to Packer. There are still a couple missing +features in Packer, but they're minimal. ## Bugs -If you find any bugs, please report them to the -[veewee-to-packer issue tracker](https://github.com/mitchellh/veewee-to-packer). -I haven't been able to exhaustively test every Veewee template, so there -are certainly some edge cases out there. +If you find any bugs, please report them to the [veewee-to-packer issue +tracker](https://github.com/mitchellh/veewee-to-packer). I haven't been able to +exhaustively test every Veewee template, so there are certainly some edge cases +out there. diff --git a/website/source/intro/getting-started/build-image.html.markdown b/website/source/intro/getting-started/build-image.html.markdown index 4bf8eda57..ec1d851a9 100644 --- a/website/source/intro/getting-started/build-image.html.markdown +++ b/website/source/intro/getting-started/build-image.html.markdown @@ -1,29 +1,32 @@ --- -layout: "intro" -page_title: "Build an Image" -prev_url: "/intro/getting-started/setup.html" -next_url: "/intro/getting-started/provision.html" -next_title: "Provision" -description: |- - With Packer installed, let's just dive right into it and build our first image. Our first image will be an Amazon EC2 AMI with Redis pre-installed. This is just an example. Packer can create images for many platforms with anything pre-installed. ---- +description: | + With Packer installed, let's just dive right into it and build our first image. + Our first image will be an Amazon EC2 AMI with Redis pre-installed. This is just + an example. Packer can create images for many platforms with anything + pre-installed. +layout: intro +next_title: Provision +next_url: '/intro/getting-started/provision.html' +page_title: Build an Image +prev_url: '/intro/getting-started/setup.html' +... # Build an Image -With Packer installed, let's just dive right into it and build our first -image. Our first image will be an [Amazon EC2 AMI](http://aws.amazon.com/ec2/) -with Redis pre-installed. This is just an example. Packer can create images -for [many platforms](/intro/platforms.html) with anything pre-installed. +With Packer installed, let's just dive right into it and build our first image. +Our first image will be an [Amazon EC2 AMI](http://aws.amazon.com/ec2/) with +Redis pre-installed. This is just an example. Packer can create images for [many +platforms](/intro/platforms.html) with anything pre-installed. If you don't have an AWS account, [create one now](http://aws.amazon.com/free/). For the example, we'll use a "t2.micro" instance to build our image, which -qualifies under the AWS [free-tier](http://aws.amazon.com/free/), meaning -it will be free. If you already have an AWS account, you may be charged some -amount of money, but it shouldn't be more than a few cents. +qualifies under the AWS [free-tier](http://aws.amazon.com/free/), meaning it +will be free. If you already have an AWS account, you may be charged some amount +of money, but it shouldn't be more than a few cents. --> **Note:** If you're not using an account that qualifies under the AWS -free-tier, you may be charged to run these examples. The charge should only be -a few cents, but we're not responsible if it ends up being more. +-> **Note:** If you're not using an account that qualifies under the AWS +free-tier, you may be charged to run these examples. The charge should only be a +few cents, but we're not responsible if it ends up being more. Packer can build images for [many platforms](/intro/platforms.html) other than AWS, but AWS requires no additional software installed on your computer and @@ -34,16 +37,16 @@ apply to the other platforms as well. ## The Template -The configuration file used to define what image we want built and how -is called a _template_ in Packer terminology. The format of a template -is simple [JSON](http://www.json.org/). JSON struck the best balance between +The configuration file used to define what image we want built and how is called +a *template* in Packer terminology. The format of a template is simple +[JSON](http://www.json.org/). JSON struck the best balance between human-editable and machine-editable, allowing both hand-made templates as well as machine generated templates to easily be made. We'll start by creating the entire template, then we'll go over each section briefly. Create a file `example.json` and fill it with the following contents: -```javascript +``` {.javascript} { "variables": { "aws_access_key": "", @@ -62,55 +65,55 @@ briefly. Create a file `example.json` and fill it with the following contents: } ``` -When building, you'll pass in the `aws_access_key` and `aws_secret_key` as -a [user variable](/docs/templates/user-variables.html), keeping your secret -keys out of the template. You can create security credentials -on [this page](https://console.aws.amazon.com/iam/home?#security_credential). -An example IAM policy document can be found in the [Amazon EC2 builder docs](/docs/builders/amazon.html). +When building, you'll pass in the `aws_access_key` and `aws_secret_key` as a +[user variable](/docs/templates/user-variables.html), keeping your secret keys +out of the template. You can create security credentials on [this +page](https://console.aws.amazon.com/iam/home?#security_credential). An example +IAM policy document can be found in the [Amazon EC2 builder +docs](/docs/builders/amazon.html). -This is a basic template that is ready-to-go. It should be immediately recognizable -as a normal, basic JSON object. Within the object, the `builders` section -contains an array of JSON objects configuring a specific _builder_. A -builder is a component of Packer that is responsible for creating a machine -and turning that machine into an image. +This is a basic template that is ready-to-go. It should be immediately +recognizable as a normal, basic JSON object. Within the object, the `builders` +section contains an array of JSON objects configuring a specific *builder*. A +builder is a component of Packer that is responsible for creating a machine and +turning that machine into an image. -In this case, we're only configuring a single builder of type `amazon-ebs`. -This is the Amazon EC2 AMI builder that ships with Packer. This builder -builds an EBS-backed AMI by launching a source AMI, provisioning on top of -that, and re-packaging it into a new AMI. +In this case, we're only configuring a single builder of type `amazon-ebs`. This +is the Amazon EC2 AMI builder that ships with Packer. This builder builds an +EBS-backed AMI by launching a source AMI, provisioning on top of that, and +re-packaging it into a new AMI. -The additional keys within the object are configuration for this builder, specifying things -such as access keys, the source AMI to build from, and more. -The exact set of configuration variables available for a builder are -specific to each builder and can be found within the [documentation](/docs). +The additional keys within the object are configuration for this builder, +specifying things such as access keys, the source AMI to build from, and more. +The exact set of configuration variables available for a builder are specific to +each builder and can be found within the [documentation](/docs). -Before we take this template and build an image from it, let's validate the template -by running `packer validate example.json`. This command checks the syntax -as well as the configuration values to verify they look valid. The output should -look similar to below, because the template should be valid. If there are +Before we take this template and build an image from it, let's validate the +template by running `packer validate example.json`. This command checks the +syntax as well as the configuration values to verify they look valid. The output +should look similar to below, because the template should be valid. If there are any errors, this command will tell you. -```text +``` {.text} $ packer validate example.json Template validated successfully. ``` Next, let's build the image from this template. -An astute reader may notice that we said earlier we'd be building an -image with Redis pre-installed, and yet the template we made doesn't reference -Redis anywhere. In fact, this part of the documentation will only cover making -a first basic, non-provisioned image. The next section on provisioning will -cover installing Redis. +An astute reader may notice that we said earlier we'd be building an image with +Redis pre-installed, and yet the template we made doesn't reference Redis +anywhere. In fact, this part of the documentation will only cover making a first +basic, non-provisioned image. The next section on provisioning will cover +installing Redis. ## Your First Image -With a properly validated template. It is time to build your first image. -This is done by calling `packer build` with the template file. The output -should look similar to below. Note that this process typically takes a -few minutes. +With a properly validated template. It is time to build your first image. This +is done by calling `packer build` with the template file. The output should look +similar to below. Note that this process typically takes a few minutes. -```text +``` {.text} $ packer build \ -var 'aws_access_key=YOUR ACCESS KEY' \ -var 'aws_secret_key=YOUR SECRET KEY' \ @@ -139,38 +142,36 @@ $ packer build \ us-east-1: ami-19601070 ``` -At the end of running `packer build`, Packer outputs the _artifacts_ -that were created as part of the build. Artifacts are the results of a -build, and typically represent an ID (such as in the case of an AMI) or -a set of files (such as for a VMware virtual machine). In this example, -we only have a single artifact: the AMI in us-east-1 that was created. +At the end of running `packer build`, Packer outputs the *artifacts* that were +created as part of the build. Artifacts are the results of a build, and +typically represent an ID (such as in the case of an AMI) or a set of files +(such as for a VMware virtual machine). In this example, we only have a single +artifact: the AMI in us-east-1 that was created. -This AMI is ready to use. If you wanted you can go and launch this AMI -right now and it would work great. +This AMI is ready to use. If you wanted you can go and launch this AMI right now +and it would work great. --> **Note:** Your AMI ID will surely be different than the -one above. If you try to launch the one in the example output above, you -will get an error. If you want to try to launch your AMI, get the ID from -the Packer output. +-> **Note:** Your AMI ID will surely be different than the one above. If you +try to launch the one in the example output above, you will get an error. If you +want to try to launch your AMI, get the ID from the Packer output. ## Managing the Image -Packer only builds images. It does not attempt to manage them in any way. -After they're built, it is up to you to launch or destroy them as you see -fit. If you want to store and namespace images for easy reference, you -can use [Atlas by HashiCorp](https://atlas.hashicorp.com). We'll cover -remotely building and storing images at the end of this getting started guide. +Packer only builds images. It does not attempt to manage them in any way. After +they're built, it is up to you to launch or destroy them as you see fit. If you +want to store and namespace images for easy reference, you can use [Atlas by +HashiCorp](https://atlas.hashicorp.com). We'll cover remotely building and +storing images at the end of this getting started guide. -After running the above example, your AWS account -now has an AMI associated with it. AMIs are stored in S3 by Amazon, -so unless you want to be charged about $0.01 -per month, you'll probably want to remove it. Remove the AMI by -first deregistering it on the [AWS AMI management page](https://console.aws.amazon.com/ec2/home?region=us-east-1#s=Images). -Next, delete the associated snapshot on the -[AWS snapshot management page](https://console.aws.amazon.com/ec2/home?region=us-east-1#s=Snapshots). +After running the above example, your AWS account now has an AMI associated with +it. AMIs are stored in S3 by Amazon, so unless you want to be charged about +\$0.01 per month, you'll probably want to remove it. Remove the AMI by first +deregistering it on the [AWS AMI management +page](https://console.aws.amazon.com/ec2/home?region=us-east-1#s=Images). Next, +delete the associated snapshot on the [AWS snapshot management +page](https://console.aws.amazon.com/ec2/home?region=us-east-1#s=Snapshots). -Congratulations! You've just built your first image with Packer. Although -the image was pretty useless in this case (nothing was changed about it), -this page should've given you a general idea of how Packer works, what -templates are, and how to validate and build templates into machine -images. +Congratulations! You've just built your first image with Packer. Although the +image was pretty useless in this case (nothing was changed about it), this page +should've given you a general idea of how Packer works, what templates are, and +how to validate and build templates into machine images. diff --git a/website/source/intro/getting-started/next.html.markdown b/website/source/intro/getting-started/next.html.markdown index 262b84bb9..e1e7cc2ae 100644 --- a/website/source/intro/getting-started/next.html.markdown +++ b/website/source/intro/getting-started/next.html.markdown @@ -1,25 +1,29 @@ --- -layout: "intro" -page_title: "Next Steps" -description: |- - That concludes the getting started guide for Packer. You should now be comfortable with basic Packer usage, should understand templates, defining builds, provisioners, etc. At this point you're ready to begin playing with and using Packer in real scenarios. ---- +description: | + That concludes the getting started guide for Packer. You should now be + comfortable with basic Packer usage, should understand templates, defining + builds, provisioners, etc. At this point you're ready to begin playing with and + using Packer in real scenarios. +layout: intro +page_title: Next Steps +... # Next Steps -That concludes the getting started guide for Packer. You should now be comfortable -with basic Packer usage, should understand templates, defining builds, provisioners, -etc. At this point you're ready to begin playing with and using Packer -in real scenarios. +That concludes the getting started guide for Packer. You should now be +comfortable with basic Packer usage, should understand templates, defining +builds, provisioners, etc. At this point you're ready to begin playing with and +using Packer in real scenarios. -From this point forward, the most important reference for you will be -the [documentation](/docs). The documentation is less of a guide and -more of a reference of all the overall features and options of Packer. +From this point forward, the most important reference for you will be the +[documentation](/docs). The documentation is less of a guide and more of a +reference of all the overall features and options of Packer. -If you're interested in learning more about how Packer fits into the -HashiCorp ecosystem of tools, read our [Atlas getting started overview](https://atlas.hashicorp.com/help/intro/getting-started). +If you're interested in learning more about how Packer fits into the HashiCorp +ecosystem of tools, read our [Atlas getting started +overview](https://atlas.hashicorp.com/help/intro/getting-started). -As you use Packer more, please voice your comments and concerns on -the [mailing list or IRC](/community). Additionally, Packer is -[open source](https://github.com/mitchellh/packer) so please contribute -if you'd like to. Contributions are very welcome. +As you use Packer more, please voice your comments and concerns on the [mailing +list or IRC](/community). Additionally, Packer is [open +source](https://github.com/mitchellh/packer) so please contribute if you'd like +to. Contributions are very welcome. diff --git a/website/source/intro/getting-started/parallel-builds.html.markdown b/website/source/intro/getting-started/parallel-builds.html.markdown index 90554dacc..626033ef2 100644 --- a/website/source/intro/getting-started/parallel-builds.html.markdown +++ b/website/source/intro/getting-started/parallel-builds.html.markdown @@ -1,57 +1,59 @@ --- -layout: "intro" -page_title: "Parallel Builds" -prev_url: "/intro/getting-started/provision.html" -next_url: "/intro/getting-started/vagrant.html" -next_title: "Vagrant Boxes" -description: |- - So far we've shown how Packer can automatically build an image and provision it. This on its own is already quite powerful. But Packer can do better than that. Packer can create multiple images for multiple platforms in parallel, all configured from a single template. ---- +description: | + So far we've shown how Packer can automatically build an image and provision it. + This on its own is already quite powerful. But Packer can do better than that. + Packer can create multiple images for multiple platforms in parallel, all + configured from a single template. +layout: intro +next_title: Vagrant Boxes +next_url: '/intro/getting-started/vagrant.html' +page_title: Parallel Builds +prev_url: '/intro/getting-started/provision.html' +... # Parallel Builds So far we've shown how Packer can automatically build an image and provision it. This on its own is already quite powerful. But Packer can do better than that. -Packer can create multiple images for multiple platforms _in parallel_, all +Packer can create multiple images for multiple platforms *in parallel*, all configured from a single template. -This is a very useful and important feature of Packer. As an example, -Packer is able to make an AMI and a VMware virtual machine -in parallel provisioned with the _same scripts_, resulting in near-identical -images. The AMI can be used for production, the VMware machine can be used -for development. Or, another example, if you're using Packer to build -[software appliances](http://en.wikipedia.org/wiki/Software_appliance), -then you can build the appliance for every supported platform all in -parallel, all configured from a single template. +This is a very useful and important feature of Packer. As an example, Packer is +able to make an AMI and a VMware virtual machine in parallel provisioned with +the *same scripts*, resulting in near-identical images. The AMI can be used for +production, the VMware machine can be used for development. Or, another example, +if you're using Packer to build [software +appliances](http://en.wikipedia.org/wiki/Software_appliance), then you can build +the appliance for every supported platform all in parallel, all configured from +a single template. -Once you start taking advantage of this feature, the possibilities begin -to unfold in front of you. +Once you start taking advantage of this feature, the possibilities begin to +unfold in front of you. -Continuing on the example in this getting started guide, we'll build -a [DigitalOcean](http://www.digitalocean.com) image as well as an AMI. Both -will be near-identical: bare bones Ubuntu OS with Redis pre-installed. -However, since we're building for both platforms, you have the option of -whether you want to use the AMI, or the DigitalOcean snapshot. Or use both. +Continuing on the example in this getting started guide, we'll build a +[DigitalOcean](http://www.digitalocean.com) image as well as an AMI. Both will +be near-identical: bare bones Ubuntu OS with Redis pre-installed. However, since +we're building for both platforms, you have the option of whether you want to +use the AMI, or the DigitalOcean snapshot. Or use both. ## Setting Up DigitalOcean -[DigitalOcean](https://www.digitalocean.com/) is a relatively new, but -very popular VPS provider that has popped up. They have a quality offering -of high performance, low cost VPS servers. We'll be building a DigitalOcean -snapshot for this example. +[DigitalOcean](https://www.digitalocean.com/) is a relatively new, but very +popular VPS provider that has popped up. They have a quality offering of high +performance, low cost VPS servers. We'll be building a DigitalOcean snapshot for +this example. -In order to do this, you'll need an account with DigitalOcean. -[Sign up for an account now](https://www.digitalocean.com/). It is free -to sign up. Because the "droplets" (servers) are charged hourly, you -_will_ be charged $0.01 for every image you create with Packer. If -you're not okay with this, just follow along. +In order to do this, you'll need an account with DigitalOcean. [Sign up for an +account now](https://www.digitalocean.com/). It is free to sign up. Because the +"droplets" (servers) are charged hourly, you *will* be charged \$0.01 for every +image you create with Packer. If you're not okay with this, just follow along. -!> **Warning!** You _will_ be charged $0.01 by DigitalOcean per image +!> **Warning!** You *will* be charged \$0.01 by DigitalOcean per image created with Packer because of the time the "droplet" is running. -Once you sign up for an account, grab your API token from -the [DigitalOcean API access page](https://cloud.digitalocean.com/settings/applications). -Save these values somewhere; you'll need them in a second. +Once you sign up for an account, grab your API token from the [DigitalOcean API +access page](https://cloud.digitalocean.com/settings/applications). Save these +values somewhere; you'll need them in a second. ## Modifying the Template @@ -59,20 +61,20 @@ We now have to modify the template to add DigitalOcean to it. Modify the template we've been using and add the following JSON object to the `builders` array. -```javascript +``` {.javascript} { "type": "digitalocean", "api_token": "{{user `do_api_token`}}", - "image": "ubuntu-14-04-x64", - "region": "nyc3", - "size": "512mb", + "image": "ubuntu-14-04-x64", + "region": "nyc3", + "size": "512mb", } ``` -You'll also need to modify the `variables` section of the template -to include the access keys for DigitalOcean. +You'll also need to modify the `variables` section of the template to include +the access keys for DigitalOcean. -```javascript +``` {.javascript} "variables": { "do_api_token": "", // ... @@ -81,61 +83,61 @@ to include the access keys for DigitalOcean. The entire template should now look like this: -```javascript +``` {.javascript} { - "variables": { - "aws_access_key": "", - "aws_secret_key": "", - "do_api_token": "" - }, - "builders": [{ - "type": "amazon-ebs", - "access_key": "{{user `aws_access_key`}}", - "secret_key": "{{user `aws_secret_key`}}", - "region": "us-east-1", - "source_ami": "ami-de0d9eb7", - "instance_type": "t1.micro", - "ssh_username": "ubuntu", - "ami_name": "packer-example {{timestamp}}" - },{ - "type": "digitalocean", - "api_token": "{{user `do_api_token`}}", - "image": "ubuntu-14-04-x64", - "region": "nyc3", - "size": "512mb" - }], - "provisioners": [{ - "type": "shell", - "inline": [ - "sleep 30", - "sudo apt-get update", - "sudo apt-get install -y redis-server" - ] - }] + "variables": { + "aws_access_key": "", + "aws_secret_key": "", + "do_api_token": "" + }, + "builders": [{ + "type": "amazon-ebs", + "access_key": "{{user `aws_access_key`}}", + "secret_key": "{{user `aws_secret_key`}}", + "region": "us-east-1", + "source_ami": "ami-de0d9eb7", + "instance_type": "t1.micro", + "ssh_username": "ubuntu", + "ami_name": "packer-example {{timestamp}}" + },{ + "type": "digitalocean", + "api_token": "{{user `do_api_token`}}", + "image": "ubuntu-14-04-x64", + "region": "nyc3", + "size": "512mb" + }], + "provisioners": [{ + "type": "shell", + "inline": [ + "sleep 30", + "sudo apt-get update", + "sudo apt-get install -y redis-server" + ] + }] } ``` Additional builders are simply added to the `builders` array in the template. -This tells Packer to build multiple images. The builder `type` values don't -even need to be different! In fact, if you wanted to build multiple AMIs, -you can do that as long as you specify a unique `name` for each build. +This tells Packer to build multiple images. The builder `type` values don't even +need to be different! In fact, if you wanted to build multiple AMIs, you can do +that as long as you specify a unique `name` for each build. Validate the template with `packer validate`. This is always a good practice. --> **Note:** If you're looking for more **DigitalOcean configuration options**, -you can find them on the -[DigitalOcean Builder page](/docs/builders/digitalocean.html) in the -documentation. The documentation is more of a reference manual that contains a -listing of all the available configuration options. +-> **Note:** If you're looking for more **DigitalOcean configuration +options**, you can find them on the [DigitalOcean Builder +page](/docs/builders/digitalocean.html) in the documentation. The documentation +is more of a reference manual that contains a listing of all the available +configuration options. ## Build -Now run `packer build` with your user variables. The output is too verbose to include -all of it, but a portion of it is reproduced below. Note that the ordering -and wording of the lines may be slightly different, but the effect is the -same. +Now run `packer build` with your user variables. The output is too verbose to +include all of it, but a portion of it is reproduced below. Note that the +ordering and wording of the lines may be slightly different, but the effect is +the same. -```text +``` {.text} $ packer build \ -var 'aws_access_key=YOUR ACCESS KEY' \ -var 'aws_secret_key=YOUR SECRET KEY' \ @@ -162,10 +164,10 @@ us-east-1: ami-376d1d5e --> digitalocean: A snapshot was created: packer-1371870364 ``` -As you can see, Packer builds both the Amazon and DigitalOcean images -in parallel. It outputs information about each in different colors -(although you can't see that in the block above) so that it is easy to identify. +As you can see, Packer builds both the Amazon and DigitalOcean images in +parallel. It outputs information about each in different colors (although you +can't see that in the block above) so that it is easy to identify. -At the end of the build, Packer outputs both of the artifacts created -(an AMI and a DigitalOcean snapshot). Both images created are bare bones -Ubuntu installations with Redis pre-installed. +At the end of the build, Packer outputs both of the artifacts created (an AMI +and a DigitalOcean snapshot). Both images created are bare bones Ubuntu +installations with Redis pre-installed. diff --git a/website/source/intro/getting-started/provision.html.markdown b/website/source/intro/getting-started/provision.html.markdown index bedb63b69..eda1f0346 100644 --- a/website/source/intro/getting-started/provision.html.markdown +++ b/website/source/intro/getting-started/provision.html.markdown @@ -1,43 +1,45 @@ --- -layout: "intro" -page_title: "Provision" -prev_url: "/intro/getting-started/build-image.html" -next_url: "/intro/getting-started/parallel-builds.html" -next_title: "Parallel Builds" -description: |- - In the previous page of this guide, you created your first image with Packer. The image you just built, however, was basically just a repackaging of a previously existing base AMI. The real utility of Packer comes from being able to install and configure software into the images as well. This stage is also known as the _provision_ step. Packer fully supports automated provisioning in order to install software onto the machines prior to turning them into images. ---- +description: | + In the previous page of this guide, you created your first image with Packer. + The image you just built, however, was basically just a repackaging of a + previously existing base AMI. The real utility of Packer comes from being able + to install and configure software into the images as well. This stage is also + known as the *provision* step. Packer fully supports automated provisioning in + order to install software onto the machines prior to turning them into images. +layout: intro +next_title: Parallel Builds +next_url: '/intro/getting-started/parallel-builds.html' +page_title: Provision +prev_url: '/intro/getting-started/build-image.html' +... # Provision -In the previous page of this guide, you created your first image with -Packer. The image you just built, however, was basically just a repackaging -of a previously existing base AMI. The real utility of Packer comes from -being able to install and configure software into the images as well. -This stage is also known as the _provision_ step. Packer fully supports -automated provisioning in order to install software onto the machines prior -to turning them into images. +In the previous page of this guide, you created your first image with Packer. +The image you just built, however, was basically just a repackaging of a +previously existing base AMI. The real utility of Packer comes from being able +to install and configure software into the images as well. This stage is also +known as the *provision* step. Packer fully supports automated provisioning in +order to install software onto the machines prior to turning them into images. -In this section, we're going to complete our image by installing -Redis on it. This way, the image we end up building actually contains -Redis pre-installed. Although Redis is a small, simple example, this should -give you an idea of what it may be like to install many more packages into -the image. +In this section, we're going to complete our image by installing Redis on it. +This way, the image we end up building actually contains Redis pre-installed. +Although Redis is a small, simple example, this should give you an idea of what +it may be like to install many more packages into the image. -Historically, pre-baked images have been frowned upon because changing -them has been so tedious and slow. Because Packer is completely automated, -including provisioning, images can be changed quickly and integrated with -modern configuration management tools such as Chef or Puppet. +Historically, pre-baked images have been frowned upon because changing them has +been so tedious and slow. Because Packer is completely automated, including +provisioning, images can be changed quickly and integrated with modern +configuration management tools such as Chef or Puppet. ## Configuring Provisioners Provisioners are configured as part of the template. We'll use the built-in shell provisioner that comes with Packer to install Redis. Modify the -`example.json` template we made previously and add the following. We'll -explain the various parts of the new configuration following the code -block below. +`example.json` template we made previously and add the following. We'll explain +the various parts of the new configuration following the code block below. -```javascript +``` {.javascript} { "variables": ["..."], "builders": ["..."], @@ -53,51 +55,51 @@ block below. } ``` --> **Note:** The `sleep 30` in the example above is -very important. Because Packer is able to detect and SSH into the instance -as soon as SSH is available, Ubuntu actually doesn't get proper amounts -of time to initialize. The sleep makes sure that the OS properly initializes. +-> **Note:** The `sleep 30` in the example above is very important. Because +Packer is able to detect and SSH into the instance as soon as SSH is available, +Ubuntu actually doesn't get proper amounts of time to initialize. The sleep +makes sure that the OS properly initializes. -Hopefully it is obvious, but the `builders` section shouldn't actually -contain "...", it should be the contents setup in the previous page -of the getting started guide. Also note the comma after the `"builders": [...]` -section, which was not present in the previous lesson. +Hopefully it is obvious, but the `builders` section shouldn't actually contain +"...", it should be the contents setup in the previous page of the getting +started guide. Also note the comma after the `"builders": [...]` section, which +was not present in the previous lesson. To configure the provisioners, we add a new section `provisioners` to the -template, alongside the `builders` configuration. The provisioners section -is an array of provisioners to run. If multiple provisioners are specified, they -are run in the order given. +template, alongside the `builders` configuration. The provisioners section is an +array of provisioners to run. If multiple provisioners are specified, they are +run in the order given. -By default, each provisioner is run for every builder defined. So if we had -two builders defined in our template, such as both Amazon and DigitalOcean, then -the shell script would run as part of both builds. There are ways to restrict +By default, each provisioner is run for every builder defined. So if we had two +builders defined in our template, such as both Amazon and DigitalOcean, then the +shell script would run as part of both builds. There are ways to restrict provisioners to certain builds, but it is outside the scope of this getting started guide. It is covered in more detail in the complete [documentation](/docs). -The one provisioner we defined has a type of `shell`. This provisioner -ships with Packer and runs shell scripts on the running machine. In our -case, we specify two inline commands to run in order to install Redis. +The one provisioner we defined has a type of `shell`. This provisioner ships +with Packer and runs shell scripts on the running machine. In our case, we +specify two inline commands to run in order to install Redis. ## Build With the provisioner configured, give it a pass once again through `packer validate` to verify everything is okay, then build it using -`packer build example.json`. The output should look similar to when you -built your first image, except this time there will be a new step where -the provisioning is run. +`packer build example.json`. The output should look similar to when you built +your first image, except this time there will be a new step where the +provisioning is run. -The output from the provisioner is too verbose to include in this -guide, since it contains all the output from the shell scripts. But you -should see Redis successfully install. After that, Packer once again -turns the machine into an AMI. +The output from the provisioner is too verbose to include in this guide, since +it contains all the output from the shell scripts. But you should see Redis +successfully install. After that, Packer once again turns the machine into an +AMI. If you were to launch this AMI, Redis would be pre-installed. Cool! This is just a basic example. In a real world use case, you may be provisioning -an image with the entire stack necessary to run your application. Or maybe -just the web stack so that you can have an image for web servers pre-built. -This saves tons of time later as you launch these images since everything -is pre-installed. Additionally, since everything is pre-installed, you -can test the images as they're built and know that when they go into -production, they'll be functional. +an image with the entire stack necessary to run your application. Or maybe just +the web stack so that you can have an image for web servers pre-built. This +saves tons of time later as you launch these images since everything is +pre-installed. Additionally, since everything is pre-installed, you can test the +images as they're built and know that when they go into production, they'll be +functional. diff --git a/website/source/intro/getting-started/remote-builds.html.markdown b/website/source/intro/getting-started/remote-builds.html.markdown index e5d1b48ff..f37a5a5ad 100644 --- a/website/source/intro/getting-started/remote-builds.html.markdown +++ b/website/source/intro/getting-started/remote-builds.html.markdown @@ -1,23 +1,41 @@ --- -layout: "intro" -page_title: "Remote Builds and Storage" -prev_url: "/intro/getting-started/vagrant.html" -next_url: "/intro/getting-started/next.html" -next_title: "Next Steps" -description: |- - Up to this point in the guide, you have been running Packer on your local machine to build and provision images on AWS and DigitalOcean. However, you can use Atlas by HashiCorp to both run Packer builds remotely and store the output of builds. ---- +description: | + Up to this point in the guide, you have been running Packer on your local + machine to build and provision images on AWS and DigitalOcean. However, you can + use Atlas by HashiCorp to both run Packer builds remotely and store the output + of builds. +layout: intro +next_title: Next Steps +next_url: '/intro/getting-started/next.html' +page_title: Remote Builds and Storage +prev_url: '/intro/getting-started/vagrant.html' +... # Remote Builds and Storage -Up to this point in the guide, you have been running Packer on your local machine to build and provision images on AWS and DigitalOcean. However, you can use [Atlas by HashiCorp](https://atlas.hashicorp.com) to run Packer builds remotely and store the output of builds. + +Up to this point in the guide, you have been running Packer on your local +machine to build and provision images on AWS and DigitalOcean. However, you can +use [Atlas by HashiCorp](https://atlas.hashicorp.com) to run Packer builds +remotely and store the output of builds. ## Why Build Remotely? -By building remotely, you can move access credentials off of developer machines, release local machines from long-running Packer processes, and automatically start Packer builds from trigger sources such as `vagrant push`, a version control system, or CI tool. + +By building remotely, you can move access credentials off of developer machines, +release local machines from long-running Packer processes, and automatically +start Packer builds from trigger sources such as `vagrant push`, a version +control system, or CI tool. ## Run Packer Builds Remotely -To run Packer remotely, there are two changes that must be made to the Packer template. The first is the addition of the `push` [configuration](https://www.packer.io/docs/templates/push.html), which sends the Packer template to Atlas so it can run Packer remotely. The second modification is updating the variables section to read variables from the Atlas environment rather than the local environment. Remove the `post-processors` section for now if it is still in your template. -```javascript +To run Packer remotely, there are two changes that must be made to the Packer +template. The first is the addition of the `push` +[configuration](https://www.packer.io/docs/templates/push.html), which sends the +Packer template to Atlas so it can run Packer remotely. The second modification +is updating the variables section to read variables from the Atlas environment +rather than the local environment. Remove the `post-processors` section for now +if it is still in your template. + +``` {.javascript} { "variables": { "aws_access_key": "{{env `aws_access_key`}}", @@ -45,31 +63,35 @@ To run Packer remotely, there are two changes that must be made to the Packer te "name": "ATLAS_USERNAME/packer-tutorial" } } -``` - -To get an Atlas username, [create an account here](https://atlas.hashicorp.com/account/new?utm_source=oss&utm_medium=getting-started&utm_campaign=packer). Replace "ATLAS_USERNAME" with your username, then run `packer push -create example.json` to send the configuration to Atlas, which automatically starts the build. - -This build will fail since neither `aws_access_key` or `aws_secret_key` are set in the Atlas environment. To set environment variables in Atlas, navigate to the [operations tab](https://atlas.hashicorp.com/operations), click the "packer-tutorial" build configuration that was just created, and then click 'variables' in the left navigation. Set `aws_access_key` and `aws_secret_key` with their respective values. Now restart the Packer build by either clicking 'rebuild' in the Atlas UI or by running `packer push example.json` again. Now when you click on the active build, you can view the logs in real-time. - --> **Note:** Whenever a change is made to the Packer template, you must `packer push` to update the configuration in Atlas. - -## Store Packer Outputs -Now we have Atlas building an AMI with Redis pre-configured. This is great, but it's even better to store and version the AMI output so it can be easily deployed by a tool like [Terraform](https://terraform.io). The `atlas` [post-processor](/docs/post-processors/atlas.html) makes this process simple: - - ```javascript -{ - "variables": ["..."], - "builders": ["..."], - "provisioners": ["..."], - "push": ["..."], - "post-processors": [ - { - "type": "atlas", - "artifact": "ATLAS_USERNAME/packer-tutorial", - "artifact_type": "amazon.ami" - } - ] -} ``` -Update the `post-processors` block with your Atlas username, then `packer push example.json` and watch the build kick off in Atlas! When the build completes, the resulting artifact will be saved and stored in Atlas. \ No newline at end of file +To get an Atlas username, [create an account +here](https://atlas.hashicorp.com/account/new?utm_source=oss&utm_medium=getting-started&utm_campaign=packer). +Replace "ATLAS\_USERNAME" with your username, then run +`packer push -create example.json` to send the configuration to Atlas, which +automatically starts the build. + +This build will fail since neither `aws_access_key` or `aws_secret_key` are set +in the Atlas environment. To set environment variables in Atlas, navigate to the +[operations tab](https://atlas.hashicorp.com/operations), click the +"packer-tutorial" build configuration that was just created, and then click +'variables' in the left navigation. Set `aws_access_key` and `aws_secret_key` +with their respective values. Now restart the Packer build by either clicking +'rebuild' in the Atlas UI or by running `packer push example.json` again. Now +when you click on the active build, you can view the logs in real-time. + +-> **Note:** Whenever a change is made to the Packer template, you must +`packer push` to update the configuration in Atlas. + +## Store Packer Outputs + +Now we have Atlas building an AMI with Redis pre-configured. This is great, but +it's even better to store and version the AMI output so it can be easily +deployed by a tool like [Terraform](https://terraform.io). The `atlas` +[post-processor](/docs/post-processors/atlas.html) makes this process simple: + +`javascript { "variables": ["..."], "builders": ["..."], "provisioners": ["..."], "push": ["..."], "post-processors": [ { "type": "atlas", "artifact": "ATLAS_USERNAME/packer-tutorial", "artifact_type": "amazon.ami" } ] }` + +Update the `post-processors` block with your Atlas username, then +`packer push example.json` and watch the build kick off in Atlas! When the build +completes, the resulting artifact will be saved and stored in Atlas. diff --git a/website/source/intro/getting-started/setup.html.markdown b/website/source/intro/getting-started/setup.html.markdown index ae14c2748..a24d023e2 100644 --- a/website/source/intro/getting-started/setup.html.markdown +++ b/website/source/intro/getting-started/setup.html.markdown @@ -1,47 +1,51 @@ --- -layout: "intro" -page_title: "Install Packer" -prev_url: "/intro/platforms.html" -next_url: "/intro/getting-started/build-image.html" -next_title: "Build an Image" -description: |- - Packer must first be installed on the machine you want to run it on. To make installation easy, Packer is distributed as a binary package for all supported platforms and architectures. This page will not cover how to compile Packer from source, as that is covered in the README and is only recommended for advanced users. ---- +description: | + Packer must first be installed on the machine you want to run it on. To make + installation easy, Packer is distributed as a binary package for all supported + platforms and architectures. This page will not cover how to compile Packer from + source, as that is covered in the README and is only recommended for advanced + users. +layout: intro +next_title: Build an Image +next_url: '/intro/getting-started/build-image.html' +page_title: Install Packer +prev_url: '/intro/platforms.html' +... # Install Packer -Packer must first be installed on the machine you want to run it on. -To make installation easy, Packer is distributed as a [binary package](/downloads.html) -for all supported platforms and architectures. This page will not cover how -to compile Packer from source, as that is covered in the +Packer must first be installed on the machine you want to run it on. To make +installation easy, Packer is distributed as a [binary package](/downloads.html) +for all supported platforms and architectures. This page will not cover how to +compile Packer from source, as that is covered in the [README](https://github.com/mitchellh/packer/blob/master/README.md) and is only recommended for advanced users. ## Installing Packer -To install packer, first find the [appropriate package](/downloads.html) -for your system and download it. Packer is packaged as a "zip" file. +To install packer, first find the [appropriate package](/downloads.html) for +your system and download it. Packer is packaged as a "zip" file. Next, unzip the downloaded package into a directory where Packer will be installed. On Unix systems, `~/packer` or `/usr/local/packer` is generally good, -depending on whether you want to restrict the install to just your user -or install it system-wide. On Windows systems, you can put it wherever you'd -like. +depending on whether you want to restrict the install to just your user or +install it system-wide. On Windows systems, you can put it wherever you'd like. After unzipping the package, the directory should contain a set of binary -programs, such as `packer`, `packer-build-amazon-ebs`, etc. The final step -to installation is to make sure the directory you installed Packer to -is on the PATH. See [this page](http://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux) -for instructions on setting the PATH on Linux and Mac. -[This page](http://stackoverflow.com/questions/1618280/where-can-i-set-path-to-make-exe-on-windows) +programs, such as `packer`, `packer-build-amazon-ebs`, etc. The final step to +installation is to make sure the directory you installed Packer to is on the +PATH. See [this +page](http://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux) +for instructions on setting the PATH on Linux and Mac. [This +page](http://stackoverflow.com/questions/1618280/where-can-i-set-path-to-make-exe-on-windows) contains instructions for setting the PATH on Windows. ## Verifying the Installation -After installing Packer, verify the installation worked by opening -a new command prompt or console, and checking that `packer` is available: +After installing Packer, verify the installation worked by opening a new command +prompt or console, and checking that `packer` is available: -```text +``` {.text} $ packer usage: packer [--version] [--help] [] @@ -54,21 +58,21 @@ Available commands are: version Prints the Packer version ``` -If you get an error that `packer` could not be found, then your PATH -environment variable was not setup properly. Please go back and ensure -that your PATH variable contains the directory which has Packer installed. +If you get an error that `packer` could not be found, then your PATH environment +variable was not setup properly. Please go back and ensure that your PATH +variable contains the directory which has Packer installed. Otherwise, Packer is installed and you're ready to go! ## Alternative Installation Methods -While the binary packages is the only official method of installation, there -are alternatives available. +While the binary packages is the only official method of installation, there are +alternatives available. ### Homebrew If you're using OS X and [Homebrew](http://brew.sh), you can install Packer: -```text +``` {.text} $ brew install packer ``` diff --git a/website/source/intro/getting-started/vagrant.html.markdown b/website/source/intro/getting-started/vagrant.html.markdown index 4d6e20caf..c671095e7 100644 --- a/website/source/intro/getting-started/vagrant.html.markdown +++ b/website/source/intro/getting-started/vagrant.html.markdown @@ -1,33 +1,34 @@ --- -layout: "intro" -page_title: "Vagrant Boxes" -prev_url: "/intro/getting-started/parallel-builds.html" -next_url: "/intro/getting-started/remote-builds.html" -next_title: "Remote Builds and Storage" -description: |- - Packer also has the ability to take the results of a builder (such as an AMI or plain VMware image) and turn it into a Vagrant box. ---- +description: | + Packer also has the ability to take the results of a builder (such as an AMI or + plain VMware image) and turn it into a Vagrant box. +layout: intro +next_title: Remote Builds and Storage +next_url: '/intro/getting-started/remote-builds.html' +page_title: Vagrant Boxes +prev_url: '/intro/getting-started/parallel-builds.html' +... # Vagrant Boxes -Packer also has the ability to take the results of a builder (such as -an AMI or plain VMware image) and turn it into a [Vagrant](http://www.vagrantup.com) -box. +Packer also has the ability to take the results of a builder (such as an AMI or +plain VMware image) and turn it into a [Vagrant](http://www.vagrantup.com) box. This is done using [post-processors](/docs/templates/post-processors.html). These take an artifact created by a previous builder or post-processor and transforms it into a new one. In the case of the Vagrant post-processor, it takes an artifact from a builder and transforms it into a Vagrant box file. -Post-processors are a generally very useful concept. While the example on -this getting-started page will be creating Vagrant images, post-processors -have many interesting use cases. For example, you can write a post-processor -to compress artifacts, upload them, test them, etc. +Post-processors are a generally very useful concept. While the example on this +getting-started page will be creating Vagrant images, post-processors have many +interesting use cases. For example, you can write a post-processor to compress +artifacts, upload them, test them, etc. -Let's modify our template to use the Vagrant post-processor to turn our -AWS AMI into a Vagrant box usable with the [vagrant-aws plugin](https://github.com/mitchellh/vagrant-aws). If you followed along in the previous page and setup DigitalOcean, -Packer can't currently make Vagrant boxes for DigitalOcean, but will be able -to soon. +Let's modify our template to use the Vagrant post-processor to turn our AWS AMI +into a Vagrant box usable with the [vagrant-aws +plugin](https://github.com/mitchellh/vagrant-aws). If you followed along in the +previous page and setup DigitalOcean, Packer can't currently make Vagrant boxes +for DigitalOcean, but will be able to soon. ## Enabling the Post-Processor @@ -35,7 +36,7 @@ Post-processors are added in the `post-processors` section of a template, which we haven't created yet. Modify your `example.json` template and add the section. Your template should look like the following: -```javascript +``` {.javascript} { "builders": ["..."], "provisioners": ["..."], @@ -44,8 +45,8 @@ Your template should look like the following: ``` In this case, we're enabling a single post-processor named "vagrant". This -post-processor is built-in to Packer and will create Vagrant boxes. You -can always create [new post-processors](/docs/extend/post-processor.html), however. +post-processor is built-in to Packer and will create Vagrant boxes. You can +always create [new post-processors](/docs/extend/post-processor.html), however. The details on configuring post-processors is covered in the [post-processors](/docs/templates/post-processors.html) documentation. @@ -53,27 +54,26 @@ Validate the configuration using `packer validate`. ## Using the Post-Processor -Just run a normal `packer build` and it will now use the post-processor. -Since Packer can't currently make a Vagrant box for DigitalOcean anyways, -I recommend passing the `-only=amazon-ebs` flag to `packer build` so it only -builds the AMI. The command should look like the following: +Just run a normal `packer build` and it will now use the post-processor. Since +Packer can't currently make a Vagrant box for DigitalOcean anyways, I recommend +passing the `-only=amazon-ebs` flag to `packer build` so it only builds the AMI. +The command should look like the following: -```text +``` {.text} $ packer build -only=amazon-ebs example.json ``` -As you watch the output, you'll notice at the end in the artifact listing -that a Vagrant box was made (by default at `packer_aws.box` in the current -directory). Success! +As you watch the output, you'll notice at the end in the artifact listing that a +Vagrant box was made (by default at `packer_aws.box` in the current directory). +Success! But where did the AMI go? When using post-processors, Vagrant removes -intermediary artifacts since they're usually not wanted. Only the final -artifact is preserved. This behavior can be changed, of course. Changing -this behavior is covered [in the documentation](/docs/templates/post-processors.html). +intermediary artifacts since they're usually not wanted. Only the final artifact +is preserved. This behavior can be changed, of course. Changing this behavior is +covered [in the documentation](/docs/templates/post-processors.html). -Typically when removing intermediary artifacts, the actual underlying -files or resources of the artifact are also removed. For example, when -building a VMware image, if you turn it into a Vagrant box, the files of -the VMware image will be deleted since they were compressed into the Vagrant -box. With creating AWS images, however, the AMI is kept around, since Vagrant -needs it to function. +Typically when removing intermediary artifacts, the actual underlying files or +resources of the artifact are also removed. For example, when building a VMware +image, if you turn it into a Vagrant box, the files of the VMware image will be +deleted since they were compressed into the Vagrant box. With creating AWS +images, however, the AMI is kept around, since Vagrant needs it to function. diff --git a/website/source/intro/hashicorp-ecosystem.html.markdown b/website/source/intro/hashicorp-ecosystem.html.markdown index 37c26b9ad..034d02a65 100644 --- a/website/source/intro/hashicorp-ecosystem.html.markdown +++ b/website/source/intro/hashicorp-ecosystem.html.markdown @@ -1,32 +1,63 @@ --- -layout: "intro" -page_title: "Packer and the HashiCorp Ecosystem" -prev_url: "/intro/platforms.html" -next_url: "/intro/getting-started/setup.html" -next_title: "Getting Started: Install Packer" -description: |- - Learn how Packer fits in with the rest of the HashiCorp ecosystem of tools ---- +description: Learn how Packer fits in with the rest of the HashiCorp ecosystem of tools +layout: intro +next_title: 'Getting Started: Install Packer' +next_url: '/intro/getting-started/setup.html' +page_title: Packer and the HashiCorp Ecosystem +prev_url: '/intro/platforms.html' +... # Packer and the HashiCorp Ecosystem -HashiCorp is the creator of the open source projects Vagrant, Packer, Terraform, Serf, and Consul, and the commercial product Atlas. Packer is just one piece of the ecosystem HashiCorp has built to make application delivery a versioned, auditable, repeatable, and collaborative process. To learn more about our beliefs on the qualities of the modern datacenter and responsible application delivery, read [The Atlas Mindset: Version Control for Infrastructure](https://hashicorp.com/blog/atlas-mindset.html/?utm_source=packer&utm_campaign=HashicorpEcosystem). +HashiCorp is the creator of the open source projects Vagrant, Packer, Terraform, +Serf, and Consul, and the commercial product Atlas. Packer is just one piece of +the ecosystem HashiCorp has built to make application delivery a versioned, +auditable, repeatable, and collaborative process. To learn more about our +beliefs on the qualities of the modern datacenter and responsible application +delivery, read [The Atlas Mindset: Version Control for +Infrastructure](https://hashicorp.com/blog/atlas-mindset.html/?utm_source=packer&utm_campaign=HashicorpEcosystem). -If you are using Packer to build machine images and deployable artifacts, it's likely that you need a solution for deploying those artifacts. Terraform is our tool for creating, combining, and modifying infrastructure. +If you are using Packer to build machine images and deployable artifacts, it's +likely that you need a solution for deploying those artifacts. Terraform is our +tool for creating, combining, and modifying infrastructure. -Below are summaries of HashiCorp's open source projects and a graphic showing how Atlas connects them to create a full application delivery workflow. +Below are summaries of HashiCorp's open source projects and a graphic showing +how Atlas connects them to create a full application delivery workflow. # HashiCorp Ecosystem + ![Atlas Workflow](docs/atlas-workflow.png) -[Atlas](https://atlas.hashicorp.com/?utm_source=packer&utm_campaign=HashicorpEcosystem) is HashiCorp's only commercial product. It unites Packer, Terraform, and Consul to make application delivery a versioned, auditable, repeatable, and collaborative process. +[Atlas](https://atlas.hashicorp.com/?utm_source=packer&utm_campaign=HashicorpEcosystem) +is HashiCorp's only commercial product. It unites Packer, Terraform, and Consul +to make application delivery a versioned, auditable, repeatable, and +collaborative process. -[Packer](https://packer.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for creating machine images and deployable artifacts such as AMIs, OpenStack images, Docker containers, etc. +[Packer](https://packer.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) +is a HashiCorp tool for creating machine images and deployable artifacts such as +AMIs, OpenStack images, Docker containers, etc. -[Terraform](https://terraform.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for creating, combining, and modifying infrastructure. In the Atlas workflow Terraform reads from the artifact registry and provisions infrastructure. +[Terraform](https://terraform.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) +is a HashiCorp tool for creating, combining, and modifying infrastructure. In +the Atlas workflow Terraform reads from the artifact registry and provisions +infrastructure. -[Consul](https://consul.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for service discovery, service registry, and health checks. In the Atlas workflow Consul is configured at the Packer build stage and identifies the service(s) contained in each artifact. Since Consul is configured at the build phase with Packer, when the artifact is deployed with Terraform, it is fully configured with dependencies and service discovery pre-baked. This greatly reduces the risk of an unhealthy node in production due to configuration failure at runtime. +[Consul](https://consul.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) +is a HashiCorp tool for service discovery, service registry, and health checks. +In the Atlas workflow Consul is configured at the Packer build stage and +identifies the service(s) contained in each artifact. Since Consul is configured +at the build phase with Packer, when the artifact is deployed with Terraform, it +is fully configured with dependencies and service discovery pre-baked. This +greatly reduces the risk of an unhealthy node in production due to configuration +failure at runtime. -[Serf](https://serfdom.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for cluster membership and failure detection. Consul uses Serf's gossip protocol as the foundation for service discovery. +[Serf](https://serfdom.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is +a HashiCorp tool for cluster membership and failure detection. Consul uses +Serf's gossip protocol as the foundation for service discovery. -[Vagrant](https://www.vagrantup.com/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for managing development environments that mirror production. Vagrant environments reduce the friction of developing a project and reduce the risk of unexpected behavior appearing after deployment. Vagrant boxes can be built in parallel with production artifacts with Packer to maintain parity between development and production. +[Vagrant](https://www.vagrantup.com/?utm_source=packer&utm_campaign=HashicorpEcosystem) +is a HashiCorp tool for managing development environments that mirror +production. Vagrant environments reduce the friction of developing a project and +reduce the risk of unexpected behavior appearing after deployment. Vagrant boxes +can be built in parallel with production artifacts with Packer to maintain +parity between development and production. diff --git a/website/source/intro/index.html.markdown b/website/source/intro/index.html.markdown index 147cc51ee..c9abcebe4 100644 --- a/website/source/intro/index.html.markdown +++ b/website/source/intro/index.html.markdown @@ -1,31 +1,34 @@ --- -layout: "intro" -page_title: "Introduction" -prev_url: "#" -next_url: "/intro/why.html" -next_title: "Why Use Packer?" -description: |- - Welcome to the world of Packer! This introduction guide will show you what Packer is, explain why it exists, the benefits it has to offer, and how you can get started with it. If you're already familiar with Packer, the documentation provides more of a reference for all available features. ---- +description: | + Welcome to the world of Packer! This introduction guide will show you what + Packer is, explain why it exists, the benefits it has to offer, and how you can + get started with it. If you're already familiar with Packer, the documentation + provides more of a reference for all available features. +layout: intro +next_title: 'Why Use Packer?' +next_url: '/intro/why.html' +page_title: Introduction +prev_url: '# ' +... # Introduction to Packer Welcome to the world of Packer! This introduction guide will show you what -Packer is, explain why it exists, the benefits it has to offer, and how -you can get started with it. If you're already familiar with Packer, the +Packer is, explain why it exists, the benefits it has to offer, and how you can +get started with it. If you're already familiar with Packer, the [documentation](/docs) provides more of a reference for all available features. ## What is Packer? -Packer is an open source tool for creating identical machine images for multiple platforms -from a single source configuration. Packer is lightweight, runs on every major -operating system, and is highly performant, creating machine images for -multiple platforms in parallel. Packer does not replace configuration management -like Chef or Puppet. In fact, when building images, Packer is able to use tools -like Chef or Puppet to install software onto the image. +Packer is an open source tool for creating identical machine images for multiple +platforms from a single source configuration. Packer is lightweight, runs on +every major operating system, and is highly performant, creating machine images +for multiple platforms in parallel. Packer does not replace configuration +management like Chef or Puppet. In fact, when building images, Packer is able to +use tools like Chef or Puppet to install software onto the image. -A _machine image_ is a single static unit that contains a pre-configured operating -system and installed software which is used to quickly create new running machines. -Machine image formats change for each platform. Some examples include -[AMIs](http://en.wikipedia.org/wiki/Amazon_Machine_Image) for EC2, +A *machine image* is a single static unit that contains a pre-configured +operating system and installed software which is used to quickly create new +running machines. Machine image formats change for each platform. Some examples +include [AMIs](http://en.wikipedia.org/wiki/Amazon_Machine_Image) for EC2, VMDK/VMX files for VMware, OVF exports for VirtualBox, etc. diff --git a/website/source/intro/platforms.html.markdown b/website/source/intro/platforms.html.markdown index d97756fd7..586c0c4ec 100644 --- a/website/source/intro/platforms.html.markdown +++ b/website/source/intro/platforms.html.markdown @@ -1,65 +1,73 @@ --- -layout: "intro" -page_title: "Supported Platforms" -prev_url: "/intro/use-cases.html" -next_url: "/intro/hashicorp-ecosystem.html" -next_title: "Packer & the HashiCorp Ecosystem" -description: |- - Packer can create machine images for any platform. Packer ships with support for a set of platforms, but can be extended through plugins to support any platform. This page documents the list of supported image types that Packer supports creating. ---- +description: | + Packer can create machine images for any platform. Packer ships with support for + a set of platforms, but can be extended through plugins to support any platform. + This page documents the list of supported image types that Packer supports + creating. +layout: intro +next_title: 'Packer & the HashiCorp Ecosystem' +next_url: '/intro/hashicorp-ecosystem.html' +page_title: Supported Platforms +prev_url: '/intro/use-cases.html' +... # Supported Platforms -Packer can create machine images for any platform. Packer ships with -support for a set of platforms, but can be [extended through plugins](/docs/extend/builder.html) -to support any platform. This page documents the list of supported image -types that Packer supports creating. +Packer can create machine images for any platform. Packer ships with support for +a set of platforms, but can be [extended through +plugins](/docs/extend/builder.html) to support any platform. This page documents +the list of supported image types that Packer supports creating. -If you were looking to see what platforms Packer is able to run on, see -the page on [installing Packer](/intro/getting-started/setup.html). +If you were looking to see what platforms Packer is able to run on, see the page +on [installing Packer](/intro/getting-started/setup.html). --> **Note:** We're always looking to officially support more -target platforms. If you're interested in adding support for another -platform, please help by opening an issue or pull request within -[GitHub](https://github.com/mitchellh/packer) so we can discuss -how to make it happen. +-> **Note:** We're always looking to officially support more target +platforms. If you're interested in adding support for another platform, please +help by opening an issue or pull request within +[GitHub](https://github.com/mitchellh/packer) so we can discuss how to make it +happen. -Packer supports creating images for the following platforms or targets. -The format of the resulting image and any high-level information about the -platform is noted. They are listed in alphabetical order. For more detailed -information on supported configuration parameters and usage, please see -the appropriate [documentation page within the documentation section](/docs). +Packer supports creating images for the following platforms or targets. The +format of the resulting image and any high-level information about the platform +is noted. They are listed in alphabetical order. For more detailed information +on supported configuration parameters and usage, please see the appropriate +[documentation page within the documentation section](/docs). -* ***Amazon EC2 (AMI)***. Both EBS-backed and instance-store AMIs within +- ***Amazon EC2 (AMI)***. Both EBS-backed and instance-store AMIs within [EC2](http://aws.amazon.com/ec2/), optionally distributed to multiple regions. -* ***DigitalOcean***. Snapshots for [DigitalOcean](http://www.digitalocean.com/) +- ***DigitalOcean***. Snapshots for [DigitalOcean](http://www.digitalocean.com/) that can be used to start a pre-configured DigitalOcean instance of any size. -* ***Docker***. Snapshots for [Docker](http://www.docker.io/) - that can be used to start a pre-configured Docker instance. +- ***Docker***. Snapshots for [Docker](http://www.docker.io/) that can be used + to start a pre-configured Docker instance. -* ***Google Compute Engine***. Snapshots for [Google Compute Engine](https://cloud.google.com/products/compute-engine) - that can be used to start a pre-configured Google Compute Engine instance. +- ***Google Compute Engine***. Snapshots for [Google Compute + Engine](https://cloud.google.com/products/compute-engine) that can be used to + start a pre-configured Google Compute Engine instance. -* ***OpenStack***. Images for [OpenStack](http://www.openstack.org/) - that can be used to start pre-configured OpenStack servers. +- ***OpenStack***. Images for [OpenStack](http://www.openstack.org/) that can be + used to start pre-configured OpenStack servers. -* ***Parallels (PVM)***. Exported virtual machines for [Parallels](http://www.parallels.com/downloads/desktop/), - including virtual machine metadata such as RAM, CPUs, etc. These virtual - machines are portable and can be started on any platform Parallels runs on. +- ***Parallels (PVM)***. Exported virtual machines for + [Parallels](http://www.parallels.com/downloads/desktop/), including virtual + machine metadata such as RAM, CPUs, etc. These virtual machines are portable + and can be started on any platform Parallels runs on. -* ***QEMU***. Images for [KVM](http://www.linux-kvm.org/) or [Xen](http://www.xenproject.org/) - that can be used to start pre-configured KVM or Xen instances. +- ***QEMU***. Images for [KVM](http://www.linux-kvm.org/) or + [Xen](http://www.xenproject.org/) that can be used to start pre-configured KVM + or Xen instances. -* ***VirtualBox (OVF)***. Exported virtual machines for [VirtualBox](https://www.virtualbox.org/), - including virtual machine metadata such as RAM, CPUs, etc. These virtual - machines are portable and can be started on any platform VirtualBox runs on. +- ***VirtualBox (OVF)***. Exported virtual machines for + [VirtualBox](https://www.virtualbox.org/), including virtual machine metadata + such as RAM, CPUs, etc. These virtual machines are portable and can be started + on any platform VirtualBox runs on. -* ***VMware (VMX)***. Exported virtual machines for [VMware](http://www.vmware.com/) - that can be run within any desktop products such as Fusion, Player, or - Workstation, as well as server products such as vSphere. +- ***VMware (VMX)***. Exported virtual machines for + [VMware](http://www.vmware.com/) that can be run within any desktop products + such as Fusion, Player, or Workstation, as well as server products such + as vSphere. -As previously mentioned, these are just the target image types that Packer -ships with out of the box. You can always [extend Packer through plugins](/docs/extend/builder.html) -to support more. +As previously mentioned, these are just the target image types that Packer ships +with out of the box. You can always [extend Packer through +plugins](/docs/extend/builder.html) to support more. diff --git a/website/source/intro/use-cases.html.markdown b/website/source/intro/use-cases.html.markdown index 0b73ea32c..2cd38d967 100644 --- a/website/source/intro/use-cases.html.markdown +++ b/website/source/intro/use-cases.html.markdown @@ -1,20 +1,24 @@ --- -layout: "intro" -page_title: "Use Cases" -prev_url: "/intro/why.html" -next_url: "/intro/platforms.html" -next_title: "Supported Platforms" -description: |- - By now you should know what Packer does and what the benefits of image creation are. In this section, we'll enumerate _some_ of the use cases for Packer. Note that this is not an exhaustive list by any means. There are definitely use cases for Packer not listed here. This list is just meant to give you an idea of how Packer may improve your processes. ---- +description: | + By now you should know what Packer does and what the benefits of image creation + are. In this section, we'll enumerate *some* of the use cases for Packer. Note + that this is not an exhaustive list by any means. There are definitely use cases + for Packer not listed here. This list is just meant to give you an idea of how + Packer may improve your processes. +layout: intro +next_title: Supported Platforms +next_url: '/intro/platforms.html' +page_title: Use Cases +prev_url: '/intro/why.html' +... # Use Cases -By now you should know what Packer does and what the benefits of image -creation are. In this section, we'll enumerate _some_ of the use cases -for Packer. Note that this is not an exhaustive list by any means. There are -definitely use cases for Packer not listed here. This list is just meant -to give you an idea of how Packer may improve your processes. +By now you should know what Packer does and what the benefits of image creation +are. In this section, we'll enumerate *some* of the use cases for Packer. Note +that this is not an exhaustive list by any means. There are definitely use cases +for Packer not listed here. This list is just meant to give you an idea of how +Packer may improve your processes. ### Continuous Delivery @@ -24,30 +28,31 @@ can be used to generate new machine images for multiple platforms on every change to Chef/Puppet. As part of this pipeline, the newly created images can then be launched and -tested, verifying the infrastructure changes work. If the tests pass, you can -be confident that that image will work when deployed. This brings a new level -of stability and testability to infrastructure changes. +tested, verifying the infrastructure changes work. If the tests pass, you can be +confident that that image will work when deployed. This brings a new level of +stability and testability to infrastructure changes. ### Dev/Prod Parity -Packer helps [keep development, staging, and production as similar as possible](http://www.12factor.net/dev-prod-parity). -Packer can be used to generate images for multiple platforms at the same time. -So if you use AWS for production and VMware (perhaps with [Vagrant](http://www.vagrantup.com)) -for development, you can generate both an AMI and a VMware machine using -Packer at the same time from the same template. +Packer helps [keep development, staging, and production as similar as +possible](http://www.12factor.net/dev-prod-parity). Packer can be used to +generate images for multiple platforms at the same time. So if you use AWS for +production and VMware (perhaps with [Vagrant](http://www.vagrantup.com)) for +development, you can generate both an AMI and a VMware machine using Packer at +the same time from the same template. Mix this in with the continuous delivery use case above, and you have a pretty -slick system for consistent work environments from development all the -way through to production. +slick system for consistent work environments from development all the way +through to production. ### Appliance/Demo Creation -Since Packer creates consistent images for multiple platforms in parallel, -it is perfect for creating [appliances](http://en.wikipedia.org/wiki/Software_appliance) -and disposable product demos. As your software changes, you can automatically -create appliances with the software pre-installed. Potential users can then -get started with your software by deploying it to the environment of their -choice. +Since Packer creates consistent images for multiple platforms in parallel, it is +perfect for creating +[appliances](http://en.wikipedia.org/wiki/Software_appliance) and disposable +product demos. As your software changes, you can automatically create appliances +with the software pre-installed. Potential users can then get started with your +software by deploying it to the environment of their choice. -Packaging up software with complex requirements has never been so easy. -Or enjoyable, if you ask me. +Packaging up software with complex requirements has never been so easy. Or +enjoyable, if you ask me. diff --git a/website/source/intro/why.html.markdown b/website/source/intro/why.html.markdown index 98de7855f..ee6b5ad9e 100644 --- a/website/source/intro/why.html.markdown +++ b/website/source/intro/why.html.markdown @@ -1,24 +1,29 @@ --- -layout: "intro" -page_title: "Why Use Packer?" -prev_url: "/intro/index.html" -next_url: "/intro/use-cases.html" -next_title: "Packer Use Cases" -description: |- - Pre-baked machine images have a lot of advantages, but most have been unable to benefit from them because images have been too tedious to create and manage. There were either no existing tools to automate the creation of machine images or they had too high of a learning curve. The result is that, prior to Packer, creating machine images threatened the agility of operations teams, and therefore aren't used, despite the massive benefits. ---- +description: | + Pre-baked machine images have a lot of advantages, but most have been unable to + benefit from them because images have been too tedious to create and manage. + There were either no existing tools to automate the creation of machine images + or they had too high of a learning curve. The result is that, prior to Packer, + creating machine images threatened the agility of operations teams, and + therefore aren't used, despite the massive benefits. +layout: intro +next_title: Packer Use Cases +next_url: '/intro/use-cases.html' +page_title: 'Why Use Packer?' +prev_url: '/intro/index.html' +... # Why Use Packer? -Pre-baked machine images have a lot of advantages, but most have been unable -to benefit from them because images have been too tedious to create and manage. -There were either no existing tools to automate the creation of machine images or -they had too high of a learning curve. The result is that, prior to Packer, -creating machine images threatened the agility of operations teams, and therefore -aren't used, despite the massive benefits. +Pre-baked machine images have a lot of advantages, but most have been unable to +benefit from them because images have been too tedious to create and manage. +There were either no existing tools to automate the creation of machine images +or they had too high of a learning curve. The result is that, prior to Packer, +creating machine images threatened the agility of operations teams, and +therefore aren't used, despite the massive benefits. -Packer changes all of this. Packer is easy to use and automates the creation -of any type of machine image. It embraces modern configuration management by +Packer changes all of this. Packer is easy to use and automates the creation of +any type of machine image. It embraces modern configuration management by encouraging you to use a framework such as Chef or Puppet to install and configure the software within your Packer-made images. @@ -28,25 +33,26 @@ untapped potential and opening new opportunities. ## Advantages of Using Packer ***Super fast infrastructure deployment***. Packer images allow you to launch -completely provisioned and configured machines in seconds, rather than -several minutes or hours. This benefits not only production, but development as well, -since development virtual machines can also be launched in seconds, without waiting -for a typically much longer provisioning time. +completely provisioned and configured machines in seconds, rather than several +minutes or hours. This benefits not only production, but development as well, +since development virtual machines can also be launched in seconds, without +waiting for a typically much longer provisioning time. ***Multi-provider portability***. Because Packer creates identical images for -multiple platforms, you can run production in AWS, staging/QA in a private -cloud like OpenStack, and development in desktop virtualization solutions -such as VMware or VirtualBox. Each environment is running an identical -machine image, giving ultimate portability. +multiple platforms, you can run production in AWS, staging/QA in a private cloud +like OpenStack, and development in desktop virtualization solutions such as +VMware or VirtualBox. Each environment is running an identical machine image, +giving ultimate portability. -***Improved stability***. Packer installs and configures all the software for -a machine at the time the image is built. If there are bugs in these scripts, -they'll be caught early, rather than several minutes after a machine is launched. +***Improved stability***. Packer installs and configures all the software for a +machine at the time the image is built. If there are bugs in these scripts, +they'll be caught early, rather than several minutes after a machine is +launched. ***Greater testability***. After a machine image is built, that machine image can be quickly launched and smoke tested to verify that things appear to be -working. If they are, you can be confident that any other machines launched -from that image will function properly. +working. If they are, you can be confident that any other machines launched from +that image will function properly. Packer makes it extremely easy to take advantage of all these benefits. From c42e7cfe414b6c18ed2395c1f962bc37a2382880 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 19:41:42 -0700 Subject: [PATCH 020/100] Added note about installing pandoc if we can't find it --- website/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/Makefile b/website/Makefile index 1cc81038c..604e9c628 100644 --- a/website/Makefile +++ b/website/Makefile @@ -12,6 +12,6 @@ build: init format: bundle exec htmlbeautifier -t 2 source/*.erb bundle exec htmlbeautifier -t 2 source/layouts/*.erb + @pandoc -v > /dev/null || echo "pandoc must be installed in order to format markdown content" pandoc -v > /dev/null && find . -iname "*.html.markdown" | xargs -I{} bash -c "pandoc -r markdown -w markdown --tab-stop=2 --atx-headers -s --columns=80 {} > {}.new"\; || true pandoc -v > /dev/null && find . -iname "*.html.markdown" | xargs -I{} bash -c "mv {}.new {}"\; || true - From 555a8ba792d1ae7cc1a86d3d9971a684f221b2e1 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 20:22:50 -0700 Subject: [PATCH 021/100] Change two blanks to one blank after numbered list item --- .../docs/extend/developing-plugins.html.markdown | 4 ++-- website/source/docs/extend/plugins.html.markdown | 6 +++--- .../docs/post-processors/atlas.html.markdown | 6 +++--- .../post-processors/vagrant-cloud.html.markdown | 16 ++++++++-------- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/website/source/docs/extend/developing-plugins.html.markdown b/website/source/docs/extend/developing-plugins.html.markdown index 2ccdd437f..0d86df3d2 100644 --- a/website/source/docs/extend/developing-plugins.html.markdown +++ b/website/source/docs/extend/developing-plugins.html.markdown @@ -60,10 +60,10 @@ dependencies. There are two steps involved in creating a plugin: -1. Implement the desired interface. For example, if you're building a builder +1. Implement the desired interface. For example, if you're building a builder plugin, implement the `packer.Builder` interface. -2. Serve the interface by calling the appropriate plugin serving method in your +2. Serve the interface by calling the appropriate plugin serving method in your main method. In the case of a builder, this is `plugin.ServeBuilder`. A basic example is shown below. In this example, assume the `Builder` struct diff --git a/website/source/docs/extend/plugins.html.markdown b/website/source/docs/extend/plugins.html.markdown index f8b800a30..98249de5d 100644 --- a/website/source/docs/extend/plugins.html.markdown +++ b/website/source/docs/extend/plugins.html.markdown @@ -51,12 +51,12 @@ Once the plugin is named properly, Packer automatically discovers plugins in the following directories in the given order. If a conflicting plugin is found later, it will take precedence over one found earlier. -1. The directory where `packer` is, or the executable directory. +1. The directory where `packer` is, or the executable directory. -2. `~/.packer.d/plugins` on Unix systems or `%APPDATA%/packer.d/plugins` +2. `~/.packer.d/plugins` on Unix systems or `%APPDATA%/packer.d/plugins` on Windows. -3. The current working directory. +3. The current working directory. The valid types for plugins are: diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index c038a119a..18211c313 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -25,13 +25,13 @@ location in Atlas. Here is an example workflow: -1. Packer builds an AMI with the [Amazon AMI +1. Packer builds an AMI with the [Amazon AMI builder](/docs/builders/amazon.html) -2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas. +2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas. The `atlas` post-processor is configured with the name of the AMI, for example `hashicorp/foobar`, to create the artifact in Atlas or update the version if the artifact already exists -3. The new version is ready and available to be used in deployments with a tool +3. The new version is ready and available to be used in deployments with a tool like [Terraform](https://terraform.io) ## Configuration diff --git a/website/source/docs/post-processors/vagrant-cloud.html.markdown b/website/source/docs/post-processors/vagrant-cloud.html.markdown index e049552da..4891797e8 100644 --- a/website/source/docs/post-processors/vagrant-cloud.html.markdown +++ b/website/source/docs/post-processors/vagrant-cloud.html.markdown @@ -36,16 +36,16 @@ and deliver them to your team in some fashion. Here is an example workflow: -1. You use Packer to build a Vagrant Box for the `virtualbox` provider -2. The `vagrant-cloud` post-processor is configured to point to the box +1. You use Packer to build a Vagrant Box for the `virtualbox` provider +2. The `vagrant-cloud` post-processor is configured to point to the box `hashicorp/foobar` on Vagrant Cloud via the `box_tag` configuration -3. The post-processor receives the box from the `vagrant` post-processor -4. It then creates the configured version, or verifies the existence of it, on +3. The post-processor receives the box from the `vagrant` post-processor +4. It then creates the configured version, or verifies the existence of it, on Vagrant Cloud -5. A provider matching the name of the Vagrant provider is then created -6. The box is uploaded to Vagrant Cloud -7. The upload is verified -8. The version is released and available to users of the box +5. A provider matching the name of the Vagrant provider is then created +6. The box is uploaded to Vagrant Cloud +7. The upload is verified +8. The version is released and available to users of the box ## Configuration From d8e8f98b322d6fde3d10534ddafbeed648c80066 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 20:25:58 -0700 Subject: [PATCH 022/100] Change to 4 spaces --- website/Makefile | 2 +- website/source/community/index.html.markdown | 36 +- .../docs/basics/terminology.html.markdown | 64 +-- .../docs/builders/amazon-chroot.html.markdown | 162 ++++---- .../docs/builders/amazon-ebs.html.markdown | 220 +++++----- .../builders/amazon-instance.html.markdown | 271 ++++++------- .../source/docs/builders/amazon.html.markdown | 25 +- .../docs/builders/digitalocean.html.markdown | 52 +-- .../source/docs/builders/docker.html.markdown | 62 +-- .../docs/builders/openstack.html.markdown | 94 ++--- .../docs/builders/parallels-iso.html.markdown | 269 +++++++------ .../docs/builders/parallels-pvm.html.markdown | 177 ++++---- .../docs/builders/parallels.html.markdown | 18 +- .../source/docs/builders/qemu.html.markdown | 267 +++++++------ .../builders/virtualbox-iso.html.markdown | 318 +++++++-------- .../builders/virtualbox-ovf.html.markdown | 248 ++++++------ .../docs/builders/virtualbox.html.markdown | 19 +- .../docs/builders/vmware-iso.html.markdown | 378 +++++++++--------- .../docs/builders/vmware-vmx.html.markdown | 157 ++++---- .../source/docs/builders/vmware.html.markdown | 20 +- .../docs/command-line/build.html.markdown | 36 +- .../docs/command-line/fix.html.markdown | 2 +- .../machine-readable.html.markdown | 24 +- .../docs/command-line/push.html.markdown | 16 +- .../docs/command-line/validate.html.markdown | 4 +- .../extend/developing-plugins.html.markdown | 16 +- .../source/docs/extend/plugins.html.markdown | 20 +- .../docs/extend/post-processor.html.markdown | 16 +- .../command-build.html.markdown | 88 ++-- .../command-inspect.html.markdown | 24 +- .../command-version.html.markdown | 24 +- .../machine-readable/general.html.markdown | 8 +- .../docs/machine-readable/index.html.markdown | 8 +- .../other/core-configuration.html.markdown | 18 +- .../environmental-variables.html.markdown | 38 +- .../docs/post-processors/atlas.html.markdown | 51 +-- .../post-processors/compress.html.markdown | 22 +- .../docker-import.html.markdown | 4 +- .../post-processors/docker-push.html.markdown | 12 +- .../post-processors/docker-save.html.markdown | 2 +- .../post-processors/docker-tag.html.markdown | 8 +- .../vagrant-cloud.html.markdown | 62 +-- .../post-processors/vagrant.html.markdown | 50 +-- .../post-processors/vsphere.html.markdown | 39 +- .../provisioners/ansible-local.html.markdown | 107 +++-- .../provisioners/chef-client.html.markdown | 113 +++--- .../docs/provisioners/chef-solo.html.markdown | 127 +++--- .../docs/provisioners/file.html.markdown | 22 +- .../provisioners/powershell.html.markdown | 77 ++-- .../puppet-masterless.html.markdown | 113 +++--- .../provisioners/puppet-server.html.markdown | 50 +-- .../salt-masterless.html.markdown | 40 +- .../docs/provisioners/shell.html.markdown | 132 +++--- .../configuration-templates.html.markdown | 46 ++- .../docs/templates/introduction.html.markdown | 59 +-- .../source/docs/templates/push.html.markdown | 34 +- website/source/intro/platforms.html.markdown | 54 +-- 57 files changed, 2252 insertions(+), 2173 deletions(-) diff --git a/website/Makefile b/website/Makefile index 604e9c628..af5f71039 100644 --- a/website/Makefile +++ b/website/Makefile @@ -13,5 +13,5 @@ format: bundle exec htmlbeautifier -t 2 source/*.erb bundle exec htmlbeautifier -t 2 source/layouts/*.erb @pandoc -v > /dev/null || echo "pandoc must be installed in order to format markdown content" - pandoc -v > /dev/null && find . -iname "*.html.markdown" | xargs -I{} bash -c "pandoc -r markdown -w markdown --tab-stop=2 --atx-headers -s --columns=80 {} > {}.new"\; || true + pandoc -v > /dev/null && find . -iname "*.html.markdown" | xargs -I{} bash -c "pandoc -r markdown -w markdown --tab-stop=4 --atx-headers -s --columns=80 {} > {}.new"\; || true pandoc -v > /dev/null && find . -iname "*.html.markdown" | xargs -I{} bash -c "mv {}.new {}"\; || true diff --git a/website/source/community/index.html.markdown b/website/source/community/index.html.markdown index f4069fbdf..3951e909f 100644 --- a/website/source/community/index.html.markdown +++ b/website/source/community/index.html.markdown @@ -29,7 +29,8 @@ list as contributors come and go.
    -
    +
    +

    Mitchell Hashimoto (@mitchellh)

    @@ -41,9 +42,11 @@ list as contributors come and go. described as "automation obsessed."

    -
    -
    +
    + +
    +

    Jack Pearkes (@pearkes)

    @@ -52,9 +55,11 @@ list as contributors come and go. for Packer. Outside of Packer, Jack is an avid open source contributor and software consultant.

    -
    -
    +
    + +
    +

    Mark Peek (@markpeek)

    @@ -65,9 +70,11 @@ list as contributors come and go. IronPort Python libraries. Mark is also a FreeBSD committer.

    -
    -
    +
    + +
    +

    Ross Smith II (@rasa)

    @@ -78,9 +85,11 @@ VMware builder on Windows, and provides other valuable assistance. Ross is an open source enthusiast, published author, and freelance consultant.

    -
    -
    +
    + +
    +

    Rickard von Essen
    (@rickard-von-essen)

    @@ -90,8 +99,11 @@ Rickard von Essen maintains our Parallels Desktop builder. Rickard is an polyglot programmer and consults on Continuous Delivery.

    -
    - -
    + +
    + +
    + +
    diff --git a/website/source/docs/basics/terminology.html.markdown b/website/source/docs/basics/terminology.html.markdown index 800478143..b20220b5c 100644 --- a/website/source/docs/basics/terminology.html.markdown +++ b/website/source/docs/basics/terminology.html.markdown @@ -17,41 +17,41 @@ Luckily, there are relatively few. This page documents all the terminology required to understand and use Packer. The terminology is in alphabetical order for easy referencing. -- `Artifacts` are the results of a single build, and are usually a set of IDs or - files to represent a machine image. Every builder produces a single artifact. - As an example, in the case of the Amazon EC2 builder, the artifact is a set of - AMI IDs (one per region). For the VMware builder, the artifact is a directory - of files comprising the created virtual machine. +- `Artifacts` are the results of a single build, and are usually a set of IDs + or files to represent a machine image. Every builder produces a + single artifact. As an example, in the case of the Amazon EC2 builder, the + artifact is a set of AMI IDs (one per region). For the VMware builder, the + artifact is a directory of files comprising the created virtual machine. -- `Builds` are a single task that eventually produces an image for a - single platform. Multiple builds run in parallel. Example usage in a sentence: - "The Packer build produced an AMI to run our web application." Or: "Packer is - running the builds now for VMware, AWS, and VirtualBox." +- `Builds` are a single task that eventually produces an image for a + single platform. Multiple builds run in parallel. Example usage in a + sentence: "The Packer build produced an AMI to run our web application." Or: + "Packer is running the builds now for VMware, AWS, and VirtualBox." -- `Builders` are components of Packer that are able to create a machine image - for a single platform. Builders read in some configuration and use that to run - and generate a machine image. A builder is invoked as part of a build in order - to create the actual resulting images. Example builders include VirtualBox, - VMware, and Amazon EC2. Builders can be created and added to Packer in the - form of plugins. +- `Builders` are components of Packer that are able to create a machine image + for a single platform. Builders read in some configuration and use that to + run and generate a machine image. A builder is invoked as part of a build in + order to create the actual resulting images. Example builders include + VirtualBox, VMware, and Amazon EC2. Builders can be created and added to + Packer in the form of plugins. -- `Commands` are sub-commands for the `packer` program that perform some job. An - example command is "build", which is invoked as `packer build`. Packer ships - with a set of commands out of the box in order to define its - command-line interface. Commands can also be created and added to Packer in - the form of plugins. +- `Commands` are sub-commands for the `packer` program that perform some job. + An example command is "build", which is invoked as `packer build`. Packer + ships with a set of commands out of the box in order to define its + command-line interface. Commands can also be created and added to Packer in + the form of plugins. -- `Post-processors` are components of Packer that take the result of a builder - or another post-processor and process that to create a new artifact. Examples - of post-processors are compress to compress artifacts, upload to upload - artifacts, etc. +- `Post-processors` are components of Packer that take the result of a builder + or another post-processor and process that to create a new artifact. + Examples of post-processors are compress to compress artifacts, upload to + upload artifacts, etc. -- `Provisioners` are components of Packer that install and configure software - within a running machine prior to that machine being turned into a - static image. They perform the major work of making the image contain - useful software. Example provisioners include shell scripts, Chef, - Puppet, etc. +- `Provisioners` are components of Packer that install and configure software + within a running machine prior to that machine being turned into a + static image. They perform the major work of making the image contain + useful software. Example provisioners include shell scripts, Chef, + Puppet, etc. -- `Templates` are JSON files which define one or more builds by configuring the - various components of Packer. Packer is able to read a template and use that - information to create multiple machine images in parallel. +- `Templates` are JSON files which define one or more builds by configuring + the various components of Packer. Packer is able to read a template and use + that information to create multiple machine images in parallel. diff --git a/website/source/docs/builders/amazon-chroot.html.markdown b/website/source/docs/builders/amazon-chroot.html.markdown index c3e16a982..2826e67ab 100644 --- a/website/source/docs/builders/amazon-chroot.html.markdown +++ b/website/source/docs/builders/amazon-chroot.html.markdown @@ -60,98 +60,100 @@ builder. ### Required: -- `access_key` (string) - The access key used to communicate with AWS. If not - specified, Packer will use the key from any - [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) - file or fall back to environment variables `AWS_ACCESS_KEY_ID` or - `AWS_ACCESS_KEY` (in that order), if set. If the environmental variables - aren't set and Packer is running on an EC2 instance, Packer will check the - instance metadata for IAM role keys. +- `access_key` (string) - The access key used to communicate with AWS. If not + specified, Packer will use the key from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_ACCESS_KEY_ID` or + `AWS_ACCESS_KEY` (in that order), if set. If the environmental variables + aren't set and Packer is running on an EC2 instance, Packer will check the + instance metadata for IAM role keys. -- `ami_name` (string) - The name of the resulting AMI that will appear when - managing AMIs in the AWS console or via APIs. This must be unique. To help - make this unique, use a function like `timestamp` (see [configuration - templates](/docs/templates/configuration-templates.html) for more info) +- `ami_name` (string) - The name of the resulting AMI that will appear when + managing AMIs in the AWS console or via APIs. This must be unique. To help + make this unique, use a function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -- `secret_key` (string) - The secret key used to communicate with AWS. If not - specified, Packer will use the secret from any - [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) - file or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or - `AWS_SECRET_KEY` (in that order), if set. If the environmental variables - aren't set and Packer is running on an EC2 instance, Packer will check the - instance metadata for IAM role keys. +- `secret_key` (string) - The secret key used to communicate with AWS. If not + specified, Packer will use the secret from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or + `AWS_SECRET_KEY` (in that order), if set. If the environmental variables + aren't set and Packer is running on an EC2 instance, Packer will check the + instance metadata for IAM role keys. -- `source_ami` (string) - The source AMI whose root volume will be copied and - provisioned on the currently running instance. This must be an EBS-backed AMI - with a root volume snapshot that you have access to. +- `source_ami` (string) - The source AMI whose root volume will be copied and + provisioned on the currently running instance. This must be an EBS-backed + AMI with a root volume snapshot that you have access to. ### Optional: -- `ami_description` (string) - The description to set for the resulting AMI(s). - By default this description is empty. +- `ami_description` (string) - The description to set for the + resulting AMI(s). By default this description is empty. -- `ami_groups` (array of strings) - A list of groups that have access to launch - the resulting AMI(s). By default no groups have permission to launch the AMI. - `all` will make the AMI publicly accessible. +- `ami_groups` (array of strings) - A list of groups that have access to + launch the resulting AMI(s). By default no groups have permission to launch + the AMI. `all` will make the AMI publicly accessible. -- `ami_product_codes` (array of strings) - A list of product codes to associate - with the AMI. By default no product codes are associated with the AMI. +- `ami_product_codes` (array of strings) - A list of product codes to + associate with the AMI. By default no product codes are associated with + the AMI. -- `ami_regions` (array of strings) - A list of regions to copy the AMI to. Tags - and attributes are copied along with the AMI. AMI copying takes time depending - on the size of the AMI, but will generally take many minutes. +- `ami_regions` (array of strings) - A list of regions to copy the AMI to. + Tags and attributes are copied along with the AMI. AMI copying takes time + depending on the size of the AMI, but will generally take many minutes. -- `ami_users` (array of strings) - A list of account IDs that have access to - launch the resulting AMI(s). By default no additional users other than the - user creating the AMI has permissions to launch it. +- `ami_users` (array of strings) - A list of account IDs that have access to + launch the resulting AMI(s). By default no additional users other than the + user creating the AMI has permissions to launch it. -- `ami_virtualization_type` (string) - The type of virtualization for the AMI - you are building. This option is required to register HVM images. Can be - "paravirtual" (default) or "hvm". +- `ami_virtualization_type` (string) - The type of virtualization for the AMI + you are building. This option is required to register HVM images. Can be + "paravirtual" (default) or "hvm". -- `chroot_mounts` (array of array of strings) - This is a list of additional - devices to mount into the chroot environment. This configuration parameter - requires some additional documentation which is in the "Chroot Mounts" - section below. Please read that section for more information on how to - use this. +- `chroot_mounts` (array of array of strings) - This is a list of additional + devices to mount into the chroot environment. This configuration parameter + requires some additional documentation which is in the "Chroot Mounts" + section below. Please read that section for more information on how to + use this. -- `command_wrapper` (string) - How to run shell commands. This defaults - to "{{.Command}}". This may be useful to set if you want to set environmental - variables or perhaps run it with `sudo` or so on. This is a configuration - template where the `.Command` variable is replaced with the command to be run. +- `command_wrapper` (string) - How to run shell commands. This defaults + to "{{.Command}}". This may be useful to set if you want to set + environmental variables or perhaps run it with `sudo` or so on. This is a + configuration template where the `.Command` variable is replaced with the + command to be run. -- `copy_files` (array of strings) - Paths to files on the running EC2 instance - that will be copied into the chroot environment prior to provisioning. This is - useful, for example, to copy `/etc/resolv.conf` so that DNS lookups work. +- `copy_files` (array of strings) - Paths to files on the running EC2 instance + that will be copied into the chroot environment prior to provisioning. This + is useful, for example, to copy `/etc/resolv.conf` so that DNS lookups work. -- `device_path` (string) - The path to the device where the root volume of the - source AMI will be attached. This defaults to "" (empty string), which forces - Packer to find an open device automatically. +- `device_path` (string) - The path to the device where the root volume of the + source AMI will be attached. This defaults to "" (empty string), which + forces Packer to find an open device automatically. -- `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) - on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS - IAM policy. +- `enhanced_networking` (boolean) - Enable enhanced + networking (SriovNetSupport) on HVM-compatible AMIs. If true, add + `ec2:ModifyInstanceAttribute` to your AWS IAM policy. -- `force_deregister` (boolean) - Force Packer to first deregister an existing - AMI if one with the same name already exists. Default `false`. +- `force_deregister` (boolean) - Force Packer to first deregister an existing + AMI if one with the same name already exists. Default `false`. -- `mount_path` (string) - The path where the volume will be mounted. This is - where the chroot environment will be. This defaults to - `packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration template - where the `.Device` variable is replaced with the name of the device where the - volume is attached. +- `mount_path` (string) - The path where the volume will be mounted. This is + where the chroot environment will be. This defaults to + `packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration template + where the `.Device` variable is replaced with the name of the device where + the volume is attached. -- `mount_options` (array of strings) - Options to supply the `mount` command - when mounting devices. Each option will be prefixed with `-o` and supplied to - the `mount` command ran by Packer. Because this command is ran in a shell, - user discrestion is advised. See [this manual page for the mount - command](http://linuxcommand.org/man_pages/mount8.html) for valid file system - specific options +- `mount_options` (array of strings) - Options to supply the `mount` command + when mounting devices. Each option will be prefixed with `-o` and supplied + to the `mount` command ran by Packer. Because this command is ran in a + shell, user discrestion is advised. See [this manual page for the mount + command](http://linuxcommand.org/man_pages/mount8.html) for valid file + system specific options -- `root_volume_size` (integer) - The size of the root volume for the chroot - environment, and the resulting AMI +- `root_volume_size` (integer) - The size of the root volume for the chroot + environment, and the resulting AMI -- `tags` (object of key/value strings) - Tags applied to the AMI. +- `tags` (object of key/value strings) - Tags applied to the AMI. ## Basic Example @@ -173,11 +175,11 @@ The `chroot_mounts` configuration can be used to mount additional devices within the chroot. By default, the following additional mounts are added into the chroot by Packer: -- `/proc` (proc) -- `/sys` (sysfs) -- `/dev` (bind to real `/dev`) -- `/dev/pts` (devpts) -- `/proc/sys/fs/binfmt_misc` (binfmt\_misc) +- `/proc` (proc) +- `/sys` (sysfs) +- `/dev` (bind to real `/dev`) +- `/dev/pts` (devpts) +- `/proc/sys/fs/binfmt_misc` (binfmt\_misc) These default mounts are usually good enough for anyone and are sane defaults. However, if you want to change or add the mount points, you may using the @@ -195,12 +197,12 @@ However, if you want to change or add the mount points, you may using the `chroot_mounts` is a list of a 3-tuples of strings. The three components of the 3-tuple, in order, are: -- The filesystem type. If this is "bind", then Packer will properly bind the - filesystem to another mount point. +- The filesystem type. If this is "bind", then Packer will properly bind the + filesystem to another mount point. -- The source device. +- The source device. -- The mount directory. +- The mount directory. ## Parallelism diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index cb6b7c9d5..34b84a06b 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -40,162 +40,164 @@ builder. ### Required: -- `access_key` (string) - The access key used to communicate with AWS. If not - specified, Packer will use the key from any - [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) - file or fall back to environment variables `AWS_ACCESS_KEY_ID` or - `AWS_ACCESS_KEY` (in that order), if set. +- `access_key` (string) - The access key used to communicate with AWS. If not + specified, Packer will use the key from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_ACCESS_KEY_ID` or + `AWS_ACCESS_KEY` (in that order), if set. -- `ami_name` (string) - The name of the resulting AMI that will appear when - managing AMIs in the AWS console or via APIs. This must be unique. To help - make this unique, use a function like `timestamp` (see [configuration - templates](/docs/templates/configuration-templates.html) for more info) +- `ami_name` (string) - The name of the resulting AMI that will appear when + managing AMIs in the AWS console or via APIs. This must be unique. To help + make this unique, use a function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -- `instance_type` (string) - The EC2 instance type to use while building the - AMI, such as "m1.small". +- `instance_type` (string) - The EC2 instance type to use while building the + AMI, such as "m1.small". -- `region` (string) - The name of the region, such as "us-east-1", in which to - launch the EC2 instance to create the AMI. +- `region` (string) - The name of the region, such as "us-east-1", in which to + launch the EC2 instance to create the AMI. -- `secret_key` (string) - The secret key used to communicate with AWS. If not - specified, Packer will use the secret from any - [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) - file or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or - `AWS_SECRET_KEY` (in that order), if set. +- `secret_key` (string) - The secret key used to communicate with AWS. If not + specified, Packer will use the secret from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or + `AWS_SECRET_KEY` (in that order), if set. -- `source_ami` (string) - The initial AMI used as a base for the newly - created machine. +- `source_ami` (string) - The initial AMI used as a base for the newly + created machine. -- `ssh_username` (string) - The username to use in order to communicate over SSH - to the running machine. +- `ssh_username` (string) - The username to use in order to communicate over + SSH to the running machine. ### Optional: -- `ami_block_device_mappings` (array of block device mappings) - Add the block - device mappings to the AMI. The block device mappings allow for keys: +- `ami_block_device_mappings` (array of block device mappings) - Add the block + device mappings to the AMI. The block device mappings allow for keys: - - `device_name` (string) - The device name exposed to the instance (for +- `device_name` (string) - The device name exposed to the instance (for example, "/dev/sdh" or "xvdh") - - `virtual_name` (string) - The virtual device name. See the documentation on +- `virtual_name` (string) - The virtual device name. See the documentation on [Block Device Mapping](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) for more information - - `snapshot_id` (string) - The ID of the snapshot - - `volume_type` (string) - The volume type. gp2 for General Purpose (SSD) +- `snapshot_id` (string) - The ID of the snapshot +- `volume_type` (string) - The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic volumes - - `volume_size` (integer) - The size of the volume, in GiB. Required if not +- `volume_size` (integer) - The size of the volume, in GiB. Required if not specifying a `snapshot_id` - - `delete_on_termination` (boolean) - Indicates whether the EBS volume is +- `delete_on_termination` (boolean) - Indicates whether the EBS volume is deleted on instance termination - - `encrypted` (boolean) - Indicates whether to encrypt the volume or not - - `no_device` (boolean) - Suppresses the specified device included in the +- `encrypted` (boolean) - Indicates whether to encrypt the volume or not +- `no_device` (boolean) - Suppresses the specified device included in the block device mapping of the AMI - - `iops` (integer) - The number of I/O operations per second (IOPS) that the +- `iops` (integer) - The number of I/O operations per second (IOPS) that the volume supports. See the documentation on [IOPs](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) for more information -- `ami_description` (string) - The description to set for the resulting AMI(s). - By default this description is empty. +- `ami_description` (string) - The description to set for the + resulting AMI(s). By default this description is empty. -- `ami_groups` (array of strings) - A list of groups that have access to launch - the resulting AMI(s). By default no groups have permission to launch the AMI. - `all` will make the AMI publicly accessible. AWS currently doesn't accept any - value other than "all". +- `ami_groups` (array of strings) - A list of groups that have access to + launch the resulting AMI(s). By default no groups have permission to launch + the AMI. `all` will make the AMI publicly accessible. AWS currently doesn't + accept any value other than "all". -- `ami_product_codes` (array of strings) - A list of product codes to associate - with the AMI. By default no product codes are associated with the AMI. +- `ami_product_codes` (array of strings) - A list of product codes to + associate with the AMI. By default no product codes are associated with + the AMI. -- `ami_regions` (array of strings) - A list of regions to copy the AMI to. Tags - and attributes are copied along with the AMI. AMI copying takes time depending - on the size of the AMI, but will generally take many minutes. +- `ami_regions` (array of strings) - A list of regions to copy the AMI to. + Tags and attributes are copied along with the AMI. AMI copying takes time + depending on the size of the AMI, but will generally take many minutes. -- `ami_users` (array of strings) - A list of account IDs that have access to - launch the resulting AMI(s). By default no additional users other than the - user creating the AMI has permissions to launch it. +- `ami_users` (array of strings) - A list of account IDs that have access to + launch the resulting AMI(s). By default no additional users other than the + user creating the AMI has permissions to launch it. -- `associate_public_ip_address` (boolean) - If using a non-default VPC, public - IP addresses are not provided by default. If this is toggled, your new - instance will get a Public IP. +- `associate_public_ip_address` (boolean) - If using a non-default VPC, public + IP addresses are not provided by default. If this is toggled, your new + instance will get a Public IP. -- `availability_zone` (string) - Destination availability zone to launch - instance in. Leave this empty to allow Amazon to auto-assign. +- `availability_zone` (string) - Destination availability zone to launch + instance in. Leave this empty to allow Amazon to auto-assign. -- `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) - on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS - IAM policy. +- `enhanced_networking` (boolean) - Enable enhanced + networking (SriovNetSupport) on HVM-compatible AMIs. If true, add + `ec2:ModifyInstanceAttribute` to your AWS IAM policy. -- `force_deregister` (boolean) - Force Packer to first deregister an existing - AMI if one with the same name already exists. Default `false`. +- `force_deregister` (boolean) - Force Packer to first deregister an existing + AMI if one with the same name already exists. Default `false`. -- `iam_instance_profile` (string) - The name of an [IAM instance - profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) - to launch the EC2 instance with. +- `iam_instance_profile` (string) - The name of an [IAM instance + profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) + to launch the EC2 instance with. -- `launch_block_device_mappings` (array of block device mappings) - Add the - block device mappings to the launch instance. The block device mappings are - the same as `ami_block_device_mappings` above. +- `launch_block_device_mappings` (array of block device mappings) - Add the + block device mappings to the launch instance. The block device mappings are + the same as `ami_block_device_mappings` above. -- `run_tags` (object of key/value strings) - Tags to apply to the instance that - is *launched* to create the AMI. These tags are *not* applied to the resulting - AMI unless they're duplicated in `tags`. +- `run_tags` (object of key/value strings) - Tags to apply to the instance + that is *launched* to create the AMI. These tags are *not* applied to the + resulting AMI unless they're duplicated in `tags`. -- `security_group_id` (string) - The ID (*not* the name) of the security group - to assign to the instance. By default this is not set and Packer will - automatically create a new temporary security group to allow SSH access. Note - that if this is specified, you must be sure the security group allows access - to the `ssh_port` given below. +- `security_group_id` (string) - The ID (*not* the name) of the security group + to assign to the instance. By default this is not set and Packer will + automatically create a new temporary security group to allow SSH access. + Note that if this is specified, you must be sure the security group allows + access to the `ssh_port` given below. -- `security_group_ids` (array of strings) - A list of security groups as - described above. Note that if this is specified, you must omit the - `security_group_id`. +- `security_group_ids` (array of strings) - A list of security groups as + described above. Note that if this is specified, you must omit the + `security_group_id`. -- `spot_price` (string) - The maximum hourly price to pay for a spot instance to - create the AMI. Spot instances are a type of instance that EC2 starts when the - current spot price is less than the maximum price you specify. Spot price will - be updated based on available spot instance capacity and current spot - instance requests. It may save you some costs. You can set this to "auto" for - Packer to automatically discover the best spot price. +- `spot_price` (string) - The maximum hourly price to pay for a spot instance + to create the AMI. Spot instances are a type of instance that EC2 starts + when the current spot price is less than the maximum price you specify. Spot + price will be updated based on available spot instance capacity and current + spot instance requests. It may save you some costs. You can set this to + "auto" for Packer to automatically discover the best spot price. -- `spot_price_auto_product` (string) - Required if `spot_price` is set - to "auto". This tells Packer what sort of AMI you're launching to find the - best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, - `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` +- `spot_price_auto_product` (string) - Required if `spot_price` is set + to "auto". This tells Packer what sort of AMI you're launching to find the + best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, + `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` -- `ssh_keypair_name` (string) - If specified, this is the key that will be used - for SSH with the machine. By default, this is blank, and Packer will generate - a temporary keypair. `ssh_private_key_file` must be specified with this. +- `ssh_keypair_name` (string) - If specified, this is the key that will be + used for SSH with the machine. By default, this is blank, and Packer will + generate a temporary keypair. `ssh_private_key_file` must be specified + with this. -- `ssh_private_ip` (boolean) - If true, then SSH will always use the private IP - if available. +- `ssh_private_ip` (boolean) - If true, then SSH will always use the private + IP if available. -- `subnet_id` (string) - If using VPC, the ID of the subnet, such as - "subnet-12345def", where Packer will launch the EC2 instance. This field is - required if you are using an non-default VPC. +- `subnet_id` (string) - If using VPC, the ID of the subnet, such as + "subnet-12345def", where Packer will launch the EC2 instance. This field is + required if you are using an non-default VPC. -- `tags` (object of key/value strings) - Tags applied to the AMI and - relevant snapshots. +- `tags` (object of key/value strings) - Tags applied to the AMI and + relevant snapshots. -- `temporary_key_pair_name` (string) - The name of the temporary keypair - to generate. By default, Packer generates a name with a UUID. +- `temporary_key_pair_name` (string) - The name of the temporary keypair + to generate. By default, Packer generates a name with a UUID. -- `token` (string) - The access token to use. This is different from the access - key and secret key. If you're not sure what this is, then you probably don't - need it. This will also be read from the `AWS_SECURITY_TOKEN` - environmental variable. +- `token` (string) - The access token to use. This is different from the + access key and secret key. If you're not sure what this is, then you + probably don't need it. This will also be read from the `AWS_SECURITY_TOKEN` + environmental variable. -- `user_data` (string) - User data to apply when launching the instance. Note - that you need to be careful about escaping characters due to the templates - being JSON. It is often more convenient to use `user_data_file`, instead. +- `user_data` (string) - User data to apply when launching the instance. Note + that you need to be careful about escaping characters due to the templates + being JSON. It is often more convenient to use `user_data_file`, instead. -- `user_data_file` (string) - Path to a file that will be used for the user data - when launching the instance. +- `user_data_file` (string) - Path to a file that will be used for the user + data when launching the instance. -- `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID in - order to create a temporary security group within the VPC. +- `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID + in order to create a temporary security group within the VPC. -- `windows_password_timeout` (string) - The timeout for waiting for a Windows - password for Windows instances. Defaults to 20 minutes. Example value: "10m" +- `windows_password_timeout` (string) - The timeout for waiting for a Windows + password for Windows instances. Defaults to 20 minutes. Example value: "10m" ## Basic Example diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index 5ff36ccf2..3ba627680 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -45,196 +45,199 @@ builder. ### Required: -- `access_key` (string) - The access key used to communicate with AWS. If not - specified, Packer will use the key from any - [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) - file or fall back to environment variables `AWS_ACCESS_KEY_ID` or - `AWS_ACCESS_KEY` (in that order), if set. +- `access_key` (string) - The access key used to communicate with AWS. If not + specified, Packer will use the key from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_ACCESS_KEY_ID` or + `AWS_ACCESS_KEY` (in that order), if set. -- `account_id` (string) - Your AWS account ID. This is required for bundling - the AMI. This is *not the same* as the access key. You can find your account - ID in the security credentials page of your AWS account. +- `account_id` (string) - Your AWS account ID. This is required for bundling + the AMI. This is *not the same* as the access key. You can find your account + ID in the security credentials page of your AWS account. -- `ami_name` (string) - The name of the resulting AMI that will appear when - managing AMIs in the AWS console or via APIs. This must be unique. To help - make this unique, use a function like `timestamp` (see [configuration - templates](/docs/templates/configuration-templates.html) for more info) +- `ami_name` (string) - The name of the resulting AMI that will appear when + managing AMIs in the AWS console or via APIs. This must be unique. To help + make this unique, use a function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -- `instance_type` (string) - The EC2 instance type to use while building the - AMI, such as "m1.small". +- `instance_type` (string) - The EC2 instance type to use while building the + AMI, such as "m1.small". -- `region` (string) - The name of the region, such as "us-east-1", in which to - launch the EC2 instance to create the AMI. +- `region` (string) - The name of the region, such as "us-east-1", in which to + launch the EC2 instance to create the AMI. -- `s3_bucket` (string) - The name of the S3 bucket to upload the AMI. This - bucket will be created if it doesn't exist. +- `s3_bucket` (string) - The name of the S3 bucket to upload the AMI. This + bucket will be created if it doesn't exist. -- `secret_key` (string) - The secret key used to communicate with AWS. If not - specified, Packer will use the secret from any - [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) - file or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or - `AWS_SECRET_KEY` (in that order), if set. +- `secret_key` (string) - The secret key used to communicate with AWS. If not + specified, Packer will use the secret from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or + `AWS_SECRET_KEY` (in that order), if set. -- `source_ami` (string) - The initial AMI used as a base for the newly - created machine. +- `source_ami` (string) - The initial AMI used as a base for the newly + created machine. -- `ssh_username` (string) - The username to use in order to communicate over SSH - to the running machine. +- `ssh_username` (string) - The username to use in order to communicate over + SSH to the running machine. -- `x509_cert_path` (string) - The local path to a valid X509 certificate for - your AWS account. This is used for bundling the AMI. This X509 certificate - must be registered with your account from the security credentials page in the - AWS console. +- `x509_cert_path` (string) - The local path to a valid X509 certificate for + your AWS account. This is used for bundling the AMI. This X509 certificate + must be registered with your account from the security credentials page in + the AWS console. -- `x509_key_path` (string) - The local path to the private key for the X509 - certificate specified by `x509_cert_path`. This is used for bundling the AMI. +- `x509_key_path` (string) - The local path to the private key for the X509 + certificate specified by `x509_cert_path`. This is used for bundling + the AMI. ### Optional: -- `ami_block_device_mappings` (array of block device mappings) - Add the block - device mappings to the AMI. The block device mappings allow for keys: +- `ami_block_device_mappings` (array of block device mappings) - Add the block + device mappings to the AMI. The block device mappings allow for keys: - - `device_name` (string) - The device name exposed to the instance (for +- `device_name` (string) - The device name exposed to the instance (for example, "/dev/sdh" or "xvdh") - - `virtual_name` (string) - The virtual device name. See the documentation on +- `virtual_name` (string) - The virtual device name. See the documentation on [Block Device Mapping](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) for more information - - `snapshot_id` (string) - The ID of the snapshot - - `volume_type` (string) - The volume type. gp2 for General Purpose (SSD) +- `snapshot_id` (string) - The ID of the snapshot +- `volume_type` (string) - The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic volumes - - `volume_size` (integer) - The size of the volume, in GiB. Required if not +- `volume_size` (integer) - The size of the volume, in GiB. Required if not specifying a `snapshot_id` - - `delete_on_termination` (boolean) - Indicates whether the EBS volume is +- `delete_on_termination` (boolean) - Indicates whether the EBS volume is deleted on instance termination - - `encrypted` (boolean) - Indicates whether to encrypt the volume or not - - `no_device` (boolean) - Suppresses the specified device included in the +- `encrypted` (boolean) - Indicates whether to encrypt the volume or not +- `no_device` (boolean) - Suppresses the specified device included in the block device mapping of the AMI - - `iops` (integer) - The number of I/O operations per second (IOPS) that the +- `iops` (integer) - The number of I/O operations per second (IOPS) that the volume supports. See the documentation on [IOPs](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) for more information -- `ami_description` (string) - The description to set for the resulting AMI(s). - By default this description is empty. +- `ami_description` (string) - The description to set for the + resulting AMI(s). By default this description is empty. -- `ami_groups` (array of strings) - A list of groups that have access to launch - the resulting AMI(s). By default no groups have permission to launch the AMI. - `all` will make the AMI publicly accessible. AWS currently doesn't accept any - value other than "all". +- `ami_groups` (array of strings) - A list of groups that have access to + launch the resulting AMI(s). By default no groups have permission to launch + the AMI. `all` will make the AMI publicly accessible. AWS currently doesn't + accept any value other than "all". -- `ami_product_codes` (array of strings) - A list of product codes to associate - with the AMI. By default no product codes are associated with the AMI. +- `ami_product_codes` (array of strings) - A list of product codes to + associate with the AMI. By default no product codes are associated with + the AMI. -- `ami_regions` (array of strings) - A list of regions to copy the AMI to. Tags - and attributes are copied along with the AMI. AMI copying takes time depending - on the size of the AMI, but will generally take many minutes. +- `ami_regions` (array of strings) - A list of regions to copy the AMI to. + Tags and attributes are copied along with the AMI. AMI copying takes time + depending on the size of the AMI, but will generally take many minutes. -- `ami_users` (array of strings) - A list of account IDs that have access to - launch the resulting AMI(s). By default no additional users other than the - user creating the AMI has permissions to launch it. +- `ami_users` (array of strings) - A list of account IDs that have access to + launch the resulting AMI(s). By default no additional users other than the + user creating the AMI has permissions to launch it. -- `ami_virtualization_type` (string) - The type of virtualization for the AMI - you are building. This option is required to register HVM images. Can be - "paravirtual" (default) or "hvm". +- `ami_virtualization_type` (string) - The type of virtualization for the AMI + you are building. This option is required to register HVM images. Can be + "paravirtual" (default) or "hvm". -- `associate_public_ip_address` (boolean) - If using a non-default VPC, public - IP addresses are not provided by default. If this is toggled, your new - instance will get a Public IP. +- `associate_public_ip_address` (boolean) - If using a non-default VPC, public + IP addresses are not provided by default. If this is toggled, your new + instance will get a Public IP. -- `availability_zone` (string) - Destination availability zone to launch - instance in. Leave this empty to allow Amazon to auto-assign. +- `availability_zone` (string) - Destination availability zone to launch + instance in. Leave this empty to allow Amazon to auto-assign. -- `bundle_destination` (string) - The directory on the running instance where - the bundled AMI will be saved prior to uploading. By default this is "/tmp". - This directory must exist and be writable. +- `bundle_destination` (string) - The directory on the running instance where + the bundled AMI will be saved prior to uploading. By default this is "/tmp". + This directory must exist and be writable. -- `bundle_prefix` (string) - The prefix for files created from bundling the - root volume. By default this is "image-{{timestamp}}". The `timestamp` - variable should be used to make sure this is unique, otherwise it can collide - with other created AMIs by Packer in your account. +- `bundle_prefix` (string) - The prefix for files created from bundling the + root volume. By default this is "image-{{timestamp}}". The `timestamp` + variable should be used to make sure this is unique, otherwise it can + collide with other created AMIs by Packer in your account. -- `bundle_upload_command` (string) - The command to use to upload the - bundled volume. See the "custom bundle commands" section below for - more information. +- `bundle_upload_command` (string) - The command to use to upload the + bundled volume. See the "custom bundle commands" section below for + more information. -- `bundle_vol_command` (string) - The command to use to bundle the volume. See - the "custom bundle commands" section below for more information. +- `bundle_vol_command` (string) - The command to use to bundle the volume. See + the "custom bundle commands" section below for more information. -- `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) - on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS - IAM policy. +- `enhanced_networking` (boolean) - Enable enhanced + networking (SriovNetSupport) on HVM-compatible AMIs. If true, add + `ec2:ModifyInstanceAttribute` to your AWS IAM policy. -- `force_deregister` (boolean) - Force Packer to first deregister an existing - AMI if one with the same name already exists. Default `false`. +- `force_deregister` (boolean) - Force Packer to first deregister an existing + AMI if one with the same name already exists. Default `false`. -- `iam_instance_profile` (string) - The name of an [IAM instance - profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) - to launch the EC2 instance with. +- `iam_instance_profile` (string) - The name of an [IAM instance + profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) + to launch the EC2 instance with. -- `launch_block_device_mappings` (array of block device mappings) - Add the - block device mappings to the launch instance. The block device mappings are - the same as `ami_block_device_mappings` above. +- `launch_block_device_mappings` (array of block device mappings) - Add the + block device mappings to the launch instance. The block device mappings are + the same as `ami_block_device_mappings` above. -- `run_tags` (object of key/value strings) - Tags to apply to the instance that - is *launched* to create the AMI. These tags are *not* applied to the resulting - AMI unless they're duplicated in `tags`. +- `run_tags` (object of key/value strings) - Tags to apply to the instance + that is *launched* to create the AMI. These tags are *not* applied to the + resulting AMI unless they're duplicated in `tags`. -- `security_group_id` (string) - The ID (*not* the name) of the security group - to assign to the instance. By default this is not set and Packer will - automatically create a new temporary security group to allow SSH access. Note - that if this is specified, you must be sure the security group allows access - to the `ssh_port` given below. +- `security_group_id` (string) - The ID (*not* the name) of the security group + to assign to the instance. By default this is not set and Packer will + automatically create a new temporary security group to allow SSH access. + Note that if this is specified, you must be sure the security group allows + access to the `ssh_port` given below. -- `security_group_ids` (array of strings) - A list of security groups as - described above. Note that if this is specified, you must omit the - `security_group_id`. +- `security_group_ids` (array of strings) - A list of security groups as + described above. Note that if this is specified, you must omit the + `security_group_id`. -- `spot_price` (string) - The maximum hourly price to launch a spot instance to - create the AMI. It is a type of instances that EC2 starts when the maximum - price that you specify exceeds the current spot price. Spot price will be - updated based on available spot instance capacity and current spot - Instance requests. It may save you some costs. You can set this to "auto" for - Packer to automatically discover the best spot price. +- `spot_price` (string) - The maximum hourly price to launch a spot instance + to create the AMI. It is a type of instances that EC2 starts when the + maximum price that you specify exceeds the current spot price. Spot price + will be updated based on available spot instance capacity and current spot + Instance requests. It may save you some costs. You can set this to "auto" + for Packer to automatically discover the best spot price. -- `spot_price_auto_product` (string) - Required if `spot_price` is set - to "auto". This tells Packer what sort of AMI you're launching to find the - best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, - `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` +- `spot_price_auto_product` (string) - Required if `spot_price` is set + to "auto". This tells Packer what sort of AMI you're launching to find the + best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, + `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` -- `ssh_keypair_name` (string) - If specified, this is the key that will be used - for SSH with the machine. By default, this is blank, and Packer will generate - a temporary keypair. `ssh_private_key_file` must be specified with this. +- `ssh_keypair_name` (string) - If specified, this is the key that will be + used for SSH with the machine. By default, this is blank, and Packer will + generate a temporary keypair. `ssh_private_key_file` must be specified + with this. -- `ssh_private_ip` (boolean) - If true, then SSH will always use the private IP - if available. +- `ssh_private_ip` (boolean) - If true, then SSH will always use the private + IP if available. -- `subnet_id` (string) - If using VPC, the ID of the subnet, such as - "subnet-12345def", where Packer will launch the EC2 instance. This field is - required if you are using an non-default VPC. +- `subnet_id` (string) - If using VPC, the ID of the subnet, such as + "subnet-12345def", where Packer will launch the EC2 instance. This field is + required if you are using an non-default VPC. -- `tags` (object of key/value strings) - Tags applied to the AMI. +- `tags` (object of key/value strings) - Tags applied to the AMI. -- `temporary_key_pair_name` (string) - The name of the temporary keypair - to generate. By default, Packer generates a name with a UUID. +- `temporary_key_pair_name` (string) - The name of the temporary keypair + to generate. By default, Packer generates a name with a UUID. -- `user_data` (string) - User data to apply when launching the instance. Note - that you need to be careful about escaping characters due to the templates - being JSON. It is often more convenient to use `user_data_file`, instead. +- `user_data` (string) - User data to apply when launching the instance. Note + that you need to be careful about escaping characters due to the templates + being JSON. It is often more convenient to use `user_data_file`, instead. -- `user_data_file` (string) - Path to a file that will be used for the user data - when launching the instance. +- `user_data_file` (string) - Path to a file that will be used for the user + data when launching the instance. -- `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID in - order to create a temporary security group within the VPC. +- `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID + in order to create a temporary security group within the VPC. -- `x509_upload_path` (string) - The path on the remote machine where the X509 - certificate will be uploaded. This path must already exist and be writable. - X509 certificates are uploaded after provisioning is run, so it is perfectly - okay to create this directory as part of the provisioning process. +- `x509_upload_path` (string) - The path on the remote machine where the X509 + certificate will be uploaded. This path must already exist and be writable. + X509 certificates are uploaded after provisioning is run, so it is perfectly + okay to create this directory as part of the provisioning process. -- `windows_password_timeout` (string) - The timeout for waiting for a Windows - password for Windows instances. Defaults to 20 minutes. Example value: "10m" +- `windows_password_timeout` (string) - The timeout for waiting for a Windows + password for Windows instances. Defaults to 20 minutes. Example value: "10m" ## Basic Example diff --git a/website/source/docs/builders/amazon.html.markdown b/website/source/docs/builders/amazon.html.markdown index 69b4e509b..b96bfba32 100644 --- a/website/source/docs/builders/amazon.html.markdown +++ b/website/source/docs/builders/amazon.html.markdown @@ -12,20 +12,21 @@ Packer is able to create Amazon AMIs. To achieve this, Packer comes with multiple builders depending on the strategy you want to use to build the AMI. Packer supports the following builders at the moment: -- [amazon-ebs](/docs/builders/amazon-ebs.html) - Create EBS-backed AMIs by - launching a source AMI and re-packaging it into a new AMI after provisioning. - If in doubt, use this builder, which is the easiest to get started with. +- [amazon-ebs](/docs/builders/amazon-ebs.html) - Create EBS-backed AMIs by + launching a source AMI and re-packaging it into a new AMI + after provisioning. If in doubt, use this builder, which is the easiest to + get started with. -- [amazon-instance](/docs/builders/amazon-instance.html) - Create instance-store - AMIs by launching and provisioning a source instance, then rebundling it and - uploading it to S3. +- [amazon-instance](/docs/builders/amazon-instance.html) - Create + instance-store AMIs by launching and provisioning a source instance, then + rebundling it and uploading it to S3. -- [amazon-chroot](/docs/builders/amazon-chroot.html) - Create EBS-backed AMIs - from an existing EC2 instance by mounting the root device and using a - [Chroot](http://en.wikipedia.org/wiki/Chroot) environment to provision - that device. This is an **advanced builder and should not be used by - newcomers**. However, it is also the fastest way to build an EBS-backed AMI - since no new EC2 instance needs to be launched. +- [amazon-chroot](/docs/builders/amazon-chroot.html) - Create EBS-backed AMIs + from an existing EC2 instance by mounting the root device and using a + [Chroot](http://en.wikipedia.org/wiki/Chroot) environment to provision + that device. This is an **advanced builder and should not be used by + newcomers**. However, it is also the fastest way to build an EBS-backed AMI + since no new EC2 instance needs to be launched. -> **Don't know which builder to use?** If in doubt, use the [amazon-ebs builder](/docs/builders/amazon-ebs.html). It is much easier to use and Amazon diff --git a/website/source/docs/builders/digitalocean.html.markdown b/website/source/docs/builders/digitalocean.html.markdown index b20523944..b5657ce9d 100644 --- a/website/source/docs/builders/digitalocean.html.markdown +++ b/website/source/docs/builders/digitalocean.html.markdown @@ -34,41 +34,43 @@ builder. ### Required: -- `api_token` (string) - The client TOKEN to use to access your account. It can - also be specified via environment variable `DIGITALOCEAN_API_TOKEN`, if set. +- `api_token` (string) - The client TOKEN to use to access your account. It + can also be specified via environment variable `DIGITALOCEAN_API_TOKEN`, + if set. -- `image` (string) - The name (or slug) of the base image to use. This is the - image that will be used to launch a new droplet and provision it. See - https://developers.digitalocean.com/documentation/v2/\#list-all-images for - details on how to get a list of the the accepted image names/slugs. +- `image` (string) - The name (or slug) of the base image to use. This is the + image that will be used to launch a new droplet and provision it. See + https://developers.digitalocean.com/documentation/v2/\#list-all-images for + details on how to get a list of the the accepted image names/slugs. -- `region` (string) - The name (or slug) of the region to launch the droplet in. - Consequently, this is the region where the snapshot will be available. See - https://developers.digitalocean.com/documentation/v2/\#list-all-regions for - the accepted region names/slugs. +- `region` (string) - The name (or slug) of the region to launch the + droplet in. Consequently, this is the region where the snapshot will + be available. See + https://developers.digitalocean.com/documentation/v2/\#list-all-regions for + the accepted region names/slugs. -- `size` (string) - The name (or slug) of the droplet size to use. See - https://developers.digitalocean.com/documentation/v2/\#list-all-sizes for the - accepted size names/slugs. +- `size` (string) - The name (or slug) of the droplet size to use. See + https://developers.digitalocean.com/documentation/v2/\#list-all-sizes for + the accepted size names/slugs. ### Optional: -- `droplet_name` (string) - The name assigned to the droplet. DigitalOcean sets - the hostname of the machine to this value. +- `droplet_name` (string) - The name assigned to the droplet. DigitalOcean + sets the hostname of the machine to this value. -- `private_networking` (boolean) - Set to `true` to enable private networking - for the droplet being created. This defaults to `false`, or not enabled. +- `private_networking` (boolean) - Set to `true` to enable private networking + for the droplet being created. This defaults to `false`, or not enabled. -- `snapshot_name` (string) - The name of the resulting snapshot that will appear - in your account. This must be unique. To help make this unique, use a function - like `timestamp` (see [configuration - templates](/docs/templates/configuration-templates.html) for more info) +- `snapshot_name` (string) - The name of the resulting snapshot that will + appear in your account. This must be unique. To help make this unique, use a + function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -- `state_timeout` (string) - The time to wait, as a duration string, for a - droplet to enter a desired state (such as "active") before timing out. The - default state timeout is "6m". +- `state_timeout` (string) - The time to wait, as a duration string, for a + droplet to enter a desired state (such as "active") before timing out. The + default state timeout is "6m". -- `user_data` (string) - User data to launch with the Droplet. +- `user_data` (string) - User data to launch with the Droplet. ## Basic Example diff --git a/website/source/docs/builders/docker.html.markdown b/website/source/docs/builders/docker.html.markdown index b2fab5b19..76b1d4057 100644 --- a/website/source/docs/builders/docker.html.markdown +++ b/website/source/docs/builders/docker.html.markdown @@ -68,42 +68,42 @@ builder. ### Required: -- `commit` (boolean) - If true, the container will be committed to an image - rather than exported. This cannot be set if `export_path` is set. +- `commit` (boolean) - If true, the container will be committed to an image + rather than exported. This cannot be set if `export_path` is set. -- `export_path` (string) - The path where the final container will be exported - as a tar file. This cannot be set if `commit` is set to true. +- `export_path` (string) - The path where the final container will be exported + as a tar file. This cannot be set if `commit` is set to true. -- `image` (string) - The base image for the Docker container that will - be started. This image will be pulled from the Docker registry if it doesn't - already exist. +- `image` (string) - The base image for the Docker container that will + be started. This image will be pulled from the Docker registry if it doesn't + already exist. ### Optional: -- `login` (boolean) - Defaults to false. If true, the builder will login in - order to pull the image. The builder only logs in for the duration of - the pull. It always logs out afterwards. +- `login` (boolean) - Defaults to false. If true, the builder will login in + order to pull the image. The builder only logs in for the duration of + the pull. It always logs out afterwards. -- `login_email` (string) - The email to use to authenticate to login. +- `login_email` (string) - The email to use to authenticate to login. -- `login_username` (string) - The username to use to authenticate to login. +- `login_username` (string) - The username to use to authenticate to login. -- `login_password` (string) - The password to use to authenticate to login. +- `login_password` (string) - The password to use to authenticate to login. -- `login_server` (string) - The server address to login to. +- `login_server` (string) - The server address to login to. -- `pull` (boolean) - If true, the configured image will be pulled using - `docker pull` prior to use. Otherwise, it is assumed the image already exists - and can be used. This defaults to true if not set. +- `pull` (boolean) - If true, the configured image will be pulled using + `docker pull` prior to use. Otherwise, it is assumed the image already + exists and can be used. This defaults to true if not set. -- `run_command` (array of strings) - An array of arguments to pass to - `docker run` in order to run the container. By default this is set to - `["-d", "-i", "-t", "{{.Image}}", "/bin/bash"]`. As you can see, you have a - couple template variables to customize, as well. +- `run_command` (array of strings) - An array of arguments to pass to + `docker run` in order to run the container. By default this is set to + `["-d", "-i", "-t", "{{.Image}}", "/bin/bash"]`. As you can see, you have a + couple template variables to customize, as well. -- `volumes` (map of strings to strings) - A mapping of additional volumes to - mount into this container. The key of the object is the host path, the value - is the container path. +- `volumes` (map of strings to strings) - A mapping of additional volumes to + mount into this container. The key of the object is the host path, the value + is the container path. ## Using the Artifact: Export @@ -226,11 +226,11 @@ Dockerfiles have some additional features that Packer doesn't support which are able to be worked around. Many of these features will be automated by Packer in the future: -- Dockerfiles will snapshot the container at each step, allowing you to go back - to any step in the history of building. Packer doesn't do this yet, but - inter-step snapshotting is on the way. +- Dockerfiles will snapshot the container at each step, allowing you to go + back to any step in the history of building. Packer doesn't do this yet, but + inter-step snapshotting is on the way. -- Dockerfiles can contain information such as exposed ports, shared volumes, and - other metadata. Packer builds a raw Docker container image that has none of - this metadata. You can pass in much of this metadata at runtime with - `docker run`. +- Dockerfiles can contain information such as exposed ports, shared volumes, + and other metadata. Packer builds a raw Docker container image that has none + of this metadata. You can pass in much of this metadata at runtime with + `docker run`. diff --git a/website/source/docs/builders/openstack.html.markdown b/website/source/docs/builders/openstack.html.markdown index 409275c7b..01eb3c7e1 100644 --- a/website/source/docs/builders/openstack.html.markdown +++ b/website/source/docs/builders/openstack.html.markdown @@ -38,67 +38,67 @@ builder. ### Required: -- `flavor` (string) - The ID, name, or full URL for the desired flavor for the - server to be created. +- `flavor` (string) - The ID, name, or full URL for the desired flavor for the + server to be created. -- `image_name` (string) - The name of the resulting image. +- `image_name` (string) - The name of the resulting image. -- `source_image` (string) - The ID or full URL to the base image to use. This is - the image that will be used to launch a new server and provision it. Unless - you specify completely custom SSH settings, the source image must have - `cloud-init` installed so that the keypair gets assigned properly. +- `source_image` (string) - The ID or full URL to the base image to use. This + is the image that will be used to launch a new server and provision it. + Unless you specify completely custom SSH settings, the source image must + have `cloud-init` installed so that the keypair gets assigned properly. -- `username` (string) - The username used to connect to the OpenStack service. - If not specified, Packer will use the environment variable `OS_USERNAME`, - if set. +- `username` (string) - The username used to connect to the OpenStack service. + If not specified, Packer will use the environment variable `OS_USERNAME`, + if set. -- `password` (string) - The password used to connect to the OpenStack service. - If not specified, Packer will use the environment variables `OS_PASSWORD`, - if set. +- `password` (string) - The password used to connect to the OpenStack service. + If not specified, Packer will use the environment variables `OS_PASSWORD`, + if set. ### Optional: -- `api_key` (string) - The API key used to access OpenStack. Some OpenStack - installations require this. +- `api_key` (string) - The API key used to access OpenStack. Some OpenStack + installations require this. -- `availability_zone` (string) - The availability zone to launch the server in. - If this isn't specified, the default enforced by your OpenStack cluster will - be used. This may be required for some OpenStack clusters. +- `availability_zone` (string) - The availability zone to launch the + server in. If this isn't specified, the default enforced by your OpenStack + cluster will be used. This may be required for some OpenStack clusters. -- `floating_ip` (string) - A specific floating IP to assign to this instance. - `use_floating_ip` must also be set to true for this to have an affect. +- `floating_ip` (string) - A specific floating IP to assign to this instance. + `use_floating_ip` must also be set to true for this to have an affect. -- `floating_ip_pool` (string) - The name of the floating IP pool to use to - allocate a floating IP. `use_floating_ip` must also be set to true for this to - have an affect. +- `floating_ip_pool` (string) - The name of the floating IP pool to use to + allocate a floating IP. `use_floating_ip` must also be set to true for this + to have an affect. -- `insecure` (boolean) - Whether or not the connection to OpenStack can be done - over an insecure connection. By default this is false. +- `insecure` (boolean) - Whether or not the connection to OpenStack can be + done over an insecure connection. By default this is false. -- `networks` (array of strings) - A list of networks by UUID to attach to - this instance. +- `networks` (array of strings) - A list of networks by UUID to attach to + this instance. -- `tenant_id` or `tenant_name` (string) - The tenant ID or name to boot the - instance into. Some OpenStack installations require this. If not specified, - Packer will use the environment variable `OS_TENANT_NAME`, if set. +- `tenant_id` or `tenant_name` (string) - The tenant ID or name to boot the + instance into. Some OpenStack installations require this. If not specified, + Packer will use the environment variable `OS_TENANT_NAME`, if set. -- `security_groups` (array of strings) - A list of security groups by name to - add to this instance. +- `security_groups` (array of strings) - A list of security groups by name to + add to this instance. -- `region` (string) - The name of the region, such as "DFW", in which to launch - the server to create the AMI. If not specified, Packer will use the - environment variable `OS_REGION_NAME`, if set. +- `region` (string) - The name of the region, such as "DFW", in which to + launch the server to create the AMI. If not specified, Packer will use the + environment variable `OS_REGION_NAME`, if set. -- `ssh_interface` (string) - The type of interface to connect via SSH. Values - useful for Rackspace are "public" or "private", and the default behavior is to - connect via whichever is returned first from the OpenStack API. +- `ssh_interface` (string) - The type of interface to connect via SSH. Values + useful for Rackspace are "public" or "private", and the default behavior is + to connect via whichever is returned first from the OpenStack API. -- `use_floating_ip` (boolean) - Whether or not to use a floating IP for - the instance. Defaults to false. +- `use_floating_ip` (boolean) - Whether or not to use a floating IP for + the instance. Defaults to false. -- `rackconnect_wait` (boolean) - For rackspace, whether or not to wait for - Rackconnect to assign the machine an IP address before connecting via SSH. - Defaults to false. +- `rackconnect_wait` (boolean) - For rackspace, whether or not to wait for + Rackconnect to assign the machine an IP address before connecting via SSH. + Defaults to false. ## Basic Example: Rackspace public cloud @@ -138,7 +138,7 @@ appear in the template. That is because I source a standard OpenStack script with environment variables set before I run this. This script is setting environment variables like: -- `OS_AUTH_URL` -- `OS_TENANT_ID` -- `OS_USERNAME` -- `OS_PASSWORD` +- `OS_AUTH_URL` +- `OS_TENANT_ID` +- `OS_USERNAME` +- `OS_PASSWORD` diff --git a/website/source/docs/builders/parallels-iso.html.markdown b/website/source/docs/builders/parallels-iso.html.markdown index d89b5394f..76278ec2b 100644 --- a/website/source/docs/builders/parallels-iso.html.markdown +++ b/website/source/docs/builders/parallels-iso.html.markdown @@ -56,146 +56,149 @@ builder. ### Required: -- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO files - are so large, this is required and Packer will verify it prior to booting a - virtual machine with the ISO attached. The type of the checksum is specified - with `iso_checksum_type`, documented below. +- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO + files are so large, this is required and Packer will verify it prior to + booting a virtual machine with the ISO attached. The type of the checksum is + specified with `iso_checksum_type`, documented below. -- `iso_checksum_type` (string) - The type of the checksum specified in - `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or - "sha512" currently. While "none" will skip checksumming, this is not - recommended since ISO files are generally large and corruption does happen - from time to time. +- `iso_checksum_type` (string) - The type of the checksum specified in + `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or + "sha512" currently. While "none" will skip checksumming, this is not + recommended since ISO files are generally large and corruption does happen + from time to time. -- `iso_url` (string) - A URL to the ISO containing the installation image. This - URL can be either an HTTP URL or a file URL (or path to a file). If this is an - HTTP URL, Packer will download it and cache it between runs. +- `iso_url` (string) - A URL to the ISO containing the installation image. + This URL can be either an HTTP URL or a file URL (or path to a file). If + this is an HTTP URL, Packer will download it and cache it between runs. -- `ssh_username` (string) - The username to use to SSH into the machine once the - OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once + the OS is installed. -- `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to - install into the VM. Valid values are "win", "lin", "mac", "os2" and "other". - This can be omitted only if `parallels_tools_mode` is "disable". +- `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to + install into the VM. Valid values are "win", "lin", "mac", "os2" + and "other". This can be omitted only if `parallels_tools_mode` + is "disable". ### Optional: -- `boot_command` (array of strings) - This is an array of commands to type when - the virtual machine is first booted. The goal of these commands should be to - type just enough to initialize the operating system installer. Special keys - can be typed as well, and are covered in the section below on the - boot command. If this is not specified, it is assumed the installer will - start itself. +- `boot_command` (array of strings) - This is an array of commands to type + when the virtual machine is first booted. The goal of these commands should + be to type just enough to initialize the operating system installer. Special + keys can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -- `boot_wait` (string) - The time to wait after booting the initial virtual - machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five - seconds and one minute 30 seconds, respectively. If this isn't specified, the - default is 10 seconds. +- `boot_wait` (string) - The time to wait after booting the initial virtual + machine before typing the `boot_command`. The value of this should be + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait + five seconds and one minute 30 seconds, respectively. If this isn't + specified, the default is 10 seconds. -- `disk_size` (integer) - The size, in megabytes, of the hard disk to create for - the VM. By default, this is 40000 (about 40 GB). +- `disk_size` (integer) - The size, in megabytes, of the hard disk to create + for the VM. By default, this is 40000 (about 40 GB). -- `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful for - unattended Windows installs, which look for an `Autounattend.xml` file on - removable media. By default, no floppy will be attached. All files listed in - this setting get placed into the root directory of the floppy and the floppy - is attached as the first floppy device. Currently, no support exists for - creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) - are allowed. Directory names are also allowed, which will add all the files - found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, + and \[\]) are allowed. Directory names are also allowed, which will add all + the files found in the directory to the floppy. -- `guest_os_type` (string) - The guest OS type being installed. By default this - is "other", but you can get *dramatic* performance improvements by setting - this to the proper value. To view all available values for this run - `prlctl create x --distribution list`. Setting the correct value hints to - Parallels Desktop how to optimize the virtual hardware to work best with that - operating system. +- `guest_os_type` (string) - The guest OS type being installed. By default + this is "other", but you can get *dramatic* performance improvements by + setting this to the proper value. To view all available values for this run + `prlctl create x --distribution list`. Setting the correct value hints to + Parallels Desktop how to optimize the virtual hardware to work best with + that operating system. -- `hard_drive_interface` (string) - The type of controller that the hard drives - are attached to, defaults to "sata". Valid options are "sata", "ide", - and "scsi". +- `hard_drive_interface` (string) - The type of controller that the hard + drives are attached to, defaults to "sata". Valid options are "sata", "ide", + and "scsi". -- `host_interfaces` (array of strings) - A list of which interfaces on the host - should be searched for a IP address. The first IP address found on one of - these will be used as `{{ .HTTPIP }}` in the `boot_command`. Defaults to - \["en0", "en1", "en2", "en3", "en4", "en5", "en6", "en7", "en8", "en9", - "ppp0", "ppp1", "ppp2"\]. +- `host_interfaces` (array of strings) - A list of which interfaces on the + host should be searched for a IP address. The first IP address found on one + of these will be used as `{{ .HTTPIP }}` in the `boot_command`. Defaults to + \["en0", "en1", "en2", "en3", "en4", "en5", "en6", "en7", "en8", "en9", + "ppp0", "ppp1", "ppp2"\]. -- `http_directory` (string) - Path to a directory to serve using an HTTP server. - The files in this directory will be available over HTTP that will be - requestable from the virtual machine. This is useful for hosting kickstart - files and so on. By default this is "", which means no HTTP server will - be started. The address and port of the HTTP server will be available as - variables in `boot_command`. This is covered in more detail below. +- `http_directory` (string) - Path to a directory to serve using an + HTTP server. The files in this directory will be available over HTTP that + will be requestable from the virtual machine. This is useful for hosting + kickstart files and so on. By default this is "", which means no HTTP server + will be started. The address and port of the HTTP server will be available + as variables in `boot_command`. This is covered in more detail below. -- `http_port_min` and `http_port_max` (integer) - These are the minimum and - maximum port to use for the HTTP server started to serve the `http_directory`. - Because Packer often runs in parallel, Packer will choose a randomly available - port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. By - default the values are 8000 and 9000, respectively. +- `http_port_min` and `http_port_max` (integer) - These are the minimum and + maximum port to use for the HTTP server started to serve the + `http_directory`. Because Packer often runs in parallel, Packer will choose + a randomly available port in this range to run the HTTP server. If you want + to force the HTTP server to be on one port, make this minimum and maximum + port the same. By default the values are 8000 and 9000, respectively. -- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer - will try these in order. If anything goes wrong attempting to download or - while downloading a single URL, it will move on to the next. All URLs must - point to the same file (same checksum). By default this is empty and `iso_url` - is used. Only one of `iso_url` or `iso_urls` can be specified. +- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. + Packer will try these in order. If anything goes wrong attempting to + download or while downloading a single URL, it will move on to the next. All + URLs must point to the same file (same checksum). By default this is empty + and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. -- `output_directory` (string) - This is the path to the directory where the - resulting virtual machine will be created. This may be relative or absolute. - If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running - the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the - name of the build. +- `output_directory` (string) - This is the path to the directory where the + resulting virtual machine will be created. This may be relative or absolute. + If relative, the path is relative to the working directory when `packer` + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -- `parallels_tools_guest_path` (string) - The path in the virtual machine to - upload Parallels Tools. This only takes effect if `parallels_tools_mode` - is "upload". This is a [configuration - template](/docs/templates/configuration-templates.html) that has a single - valid variable: `Flavor`, which will be the value of `parallels_tools_flavor`. - By default this is "prl-tools-{{.Flavor}}.iso" which should upload into the - login directory of the user. +- `parallels_tools_guest_path` (string) - The path in the virtual machine to + upload Parallels Tools. This only takes effect if `parallels_tools_mode` + is "upload". This is a [configuration + template](/docs/templates/configuration-templates.html) that has a single + valid variable: `Flavor`, which will be the value of + `parallels_tools_flavor`. By default this is "prl-tools-{{.Flavor}}.iso" + which should upload into the login directory of the user. -- `parallels_tools_mode` (string) - The method by which Parallels Tools are made - available to the guest for installation. Valid options are "upload", "attach", - or "disable". If the mode is "attach" the Parallels Tools ISO will be attached - as a CD device to the virtual machine. If the mode is "upload" the Parallels - Tools ISO will be uploaded to the path specified by - `parallels_tools_guest_path`. The default value is "upload". +- `parallels_tools_mode` (string) - The method by which Parallels Tools are + made available to the guest for installation. Valid options are "upload", + "attach", or "disable". If the mode is "attach" the Parallels Tools ISO will + be attached as a CD device to the virtual machine. If the mode is "upload" + the Parallels Tools ISO will be uploaded to the path specified by + `parallels_tools_guest_path`. The default value is "upload". -- `prlctl` (array of array of strings) - Custom `prlctl` commands to execute in - order to further customize the virtual machine being created. The value of - this is an array of commands to execute. The commands are executed in the - order defined in the template. For each command, the command is defined itself - as an array of strings, where each string represents a single argument on the - command-line to `prlctl` (but excluding `prlctl` itself). Each arg is treated - as a [configuration template](/docs/templates/configuration-templates.html), - where the `Name` variable is replaced with the VM name. More details on how to - use `prlctl` are below. +- `prlctl` (array of array of strings) - Custom `prlctl` commands to execute + in order to further customize the virtual machine being created. The value + of this is an array of commands to execute. The commands are executed in the + order defined in the template. For each command, the command is defined + itself as an array of strings, where each string represents a single + argument on the command-line to `prlctl` (but excluding `prlctl` itself). + Each arg is treated as a [configuration + template](/docs/templates/configuration-templates.html), where the `Name` + variable is replaced with the VM name. More details on how to use `prlctl` + are below. -- `prlctl_post` (array of array of strings) - Identical to `prlctl`, except that - it is run after the virtual machine is shutdown, and before the virtual - machine is exported. +- `prlctl_post` (array of array of strings) - Identical to `prlctl`, except + that it is run after the virtual machine is shutdown, and before the virtual + machine is exported. -- `prlctl_version_file` (string) - The path within the virtual machine to upload - a file that contains the `prlctl` version that was used to create the machine. - This information can be useful for provisioning. By default this is - ".prlctl\_version", which will generally upload it into the home directory. +- `prlctl_version_file` (string) - The path within the virtual machine to + upload a file that contains the `prlctl` version that was used to create + the machine. This information can be useful for provisioning. By default + this is ".prlctl\_version", which will generally upload it into the + home directory. -- `shutdown_command` (string) - The command to use to gracefully shut down the - machine once all the provisioning is done. By default this is an empty string, - which tells Packer to just forcefully shut down the machine. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty + string, which tells Packer to just forcefully shut down the machine. -- `shutdown_timeout` (string) - The amount of time to wait after executing the - `shutdown_command` for the virtual machine to actually shut down. If it - doesn't shut down in this time, it is an error. By default, the timeout is - "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -- `vm_name` (string) - This is the name of the PVM directory for the new virtual - machine, without the file extension. By default this is "packer-BUILDNAME", - where "BUILDNAME" is the name of the build. +- `vm_name` (string) - This is the name of the PVM directory for the new + virtual machine, without the file extension. By default this is + "packer-BUILDNAME", where "BUILDNAME" is the name of the build. ## Boot Command @@ -214,40 +217,40 @@ simulating a human actually typing the keyboard. There are a set of special keys available. If these are in your boot command, they will be replaced by the proper key: -- `` - Backspace +- `` - Backspace -- `` - Delete +- `` - Delete -- `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -- `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -- `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -- `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -- `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -- `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -- `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -- `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -- `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -- `` `` `` - Adds a 1, 5 or 10 second pause before sending - any additional keys. This is useful if you have to generally wait for the UI - to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before + sending any additional keys. This is useful if you have to generally wait + for the UI to update before typing more. In addition to the special keys, each command to type is treated as a [configuration template](/docs/templates/configuration-templates.html). The available variables are: -- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server that - is started serving the directory specified by the `http_directory` - configuration parameter. If `http_directory` isn't specified, these will be - blank! +- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server + that is started serving the directory specified by the `http_directory` + configuration parameter. If `http_directory` isn't specified, these will be + blank! Example boot command. This is actually a working boot command used to start an Ubuntu 12.04 installer: diff --git a/website/source/docs/builders/parallels-pvm.html.markdown b/website/source/docs/builders/parallels-pvm.html.markdown index f4f9f352c..ce13f2c19 100644 --- a/website/source/docs/builders/parallels-pvm.html.markdown +++ b/website/source/docs/builders/parallels-pvm.html.markdown @@ -53,96 +53,99 @@ builder. ### Required: -- `source_path` (string) - The path to a PVM directory that acts as the source - of this build. +- `source_path` (string) - The path to a PVM directory that acts as the source + of this build. -- `ssh_username` (string) - The username to use to SSH into the machine once the - OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once + the OS is installed. -- `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to - install into the VM. Valid values are "win", "lin", "mac", "os2" and "other". - This can be omitted only if `parallels_tools_mode` is "disable". +- `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to + install into the VM. Valid values are "win", "lin", "mac", "os2" + and "other". This can be omitted only if `parallels_tools_mode` + is "disable". ### Optional: -- `boot_command` (array of strings) - This is an array of commands to type when - the virtual machine is first booted. The goal of these commands should be to - type just enough to initialize the operating system installer. Special keys - can be typed as well, and are covered in the section below on the - boot command. If this is not specified, it is assumed the installer will - start itself. +- `boot_command` (array of strings) - This is an array of commands to type + when the virtual machine is first booted. The goal of these commands should + be to type just enough to initialize the operating system installer. Special + keys can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -- `boot_wait` (string) - The time to wait after booting the initial virtual - machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five - seconds and one minute 30 seconds, respectively. If this isn't specified, the - default is 10 seconds. +- `boot_wait` (string) - The time to wait after booting the initial virtual + machine before typing the `boot_command`. The value of this should be + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait + five seconds and one minute 30 seconds, respectively. If this isn't + specified, the default is 10 seconds. -- `floppy_files` (array of strings) - A list of files to put onto a floppy disk - that is attached when the VM is booted for the first time. This is most useful - for unattended Windows installs, which look for an `Autounattend.xml` file on - removable media. By default no floppy will be attached. The files listed in - this configuration will all be put into the root directory of the floppy disk; - sub-directories are not supported. +- `floppy_files` (array of strings) - A list of files to put onto a floppy + disk that is attached when the VM is booted for the first time. This is most + useful for unattended Windows installs, which look for an `Autounattend.xml` + file on removable media. By default no floppy will be attached. The files + listed in this configuration will all be put into the root directory of the + floppy disk; sub-directories are not supported. -- `reassign_mac` (boolean) - If this is "false" the MAC address of the first NIC - will reused when imported else a new MAC address will be generated - by Parallels. Defaults to "false". +- `reassign_mac` (boolean) - If this is "false" the MAC address of the first + NIC will reused when imported else a new MAC address will be generated + by Parallels. Defaults to "false". -- `output_directory` (string) - This is the path to the directory where the - resulting virtual machine will be created. This may be relative or absolute. - If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running - the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the - name of the build. +- `output_directory` (string) - This is the path to the directory where the + resulting virtual machine will be created. This may be relative or absolute. + If relative, the path is relative to the working directory when `packer` + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -- `parallels_tools_guest_path` (string) - The path in the VM to upload - Parallels Tools. This only takes effect if `parallels_tools_mode` is "upload". - This is a [configuration - template](/docs/templates/configuration-templates.html) that has a single - valid variable: `Flavor`, which will be the value of `parallels_tools_flavor`. - By default this is "prl-tools-{{.Flavor}}.iso" which should upload into the - login directory of the user. +- `parallels_tools_guest_path` (string) - The path in the VM to upload + Parallels Tools. This only takes effect if `parallels_tools_mode` + is "upload". This is a [configuration + template](/docs/templates/configuration-templates.html) that has a single + valid variable: `Flavor`, which will be the value of + `parallels_tools_flavor`. By default this is "prl-tools-{{.Flavor}}.iso" + which should upload into the login directory of the user. -- `parallels_tools_mode` (string) - The method by which Parallels Tools are made - available to the guest for installation. Valid options are "upload", "attach", - or "disable". If the mode is "attach" the Parallels Tools ISO will be attached - as a CD device to the virtual machine. If the mode is "upload" the Parallels - Tools ISO will be uploaded to the path specified by - `parallels_tools_guest_path`. The default value is "upload". +- `parallels_tools_mode` (string) - The method by which Parallels Tools are + made available to the guest for installation. Valid options are "upload", + "attach", or "disable". If the mode is "attach" the Parallels Tools ISO will + be attached as a CD device to the virtual machine. If the mode is "upload" + the Parallels Tools ISO will be uploaded to the path specified by + `parallels_tools_guest_path`. The default value is "upload". -- `prlctl` (array of array of strings) - Custom `prlctl` commands to execute in - order to further customize the virtual machine being created. The value of - this is an array of commands to execute. The commands are executed in the - order defined in the template. For each command, the command is defined itself - as an array of strings, where each string represents a single argument on the - command-line to `prlctl` (but excluding `prlctl` itself). Each arg is treated - as a [configuration template](/docs/templates/configuration-templates.html), - where the `Name` variable is replaced with the VM name. More details on how to - use `prlctl` are below. +- `prlctl` (array of array of strings) - Custom `prlctl` commands to execute + in order to further customize the virtual machine being created. The value + of this is an array of commands to execute. The commands are executed in the + order defined in the template. For each command, the command is defined + itself as an array of strings, where each string represents a single + argument on the command-line to `prlctl` (but excluding `prlctl` itself). + Each arg is treated as a [configuration + template](/docs/templates/configuration-templates.html), where the `Name` + variable is replaced with the VM name. More details on how to use `prlctl` + are below. -- `prlctl_post` (array of array of strings) - Identical to `prlctl`, except that - it is run after the virtual machine is shutdown, and before the virtual - machine is exported. +- `prlctl_post` (array of array of strings) - Identical to `prlctl`, except + that it is run after the virtual machine is shutdown, and before the virtual + machine is exported. -- `prlctl_version_file` (string) - The path within the virtual machine to upload - a file that contains the `prlctl` version that was used to create the machine. - This information can be useful for provisioning. By default this is - ".prlctl\_version", which will generally upload it into the home directory. +- `prlctl_version_file` (string) - The path within the virtual machine to + upload a file that contains the `prlctl` version that was used to create + the machine. This information can be useful for provisioning. By default + this is ".prlctl\_version", which will generally upload it into the + home directory. -- `shutdown_command` (string) - The command to use to gracefully shut down the - machine once all the provisioning is done. By default this is an empty string, - which tells Packer to just forcefully shut down the machine. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty + string, which tells Packer to just forcefully shut down the machine. -- `shutdown_timeout` (string) - The amount of time to wait after executing the - `shutdown_command` for the virtual machine to actually shut down. If it - doesn't shut down in this time, it is an error. By default, the timeout is - "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -- `vm_name` (string) - This is the name of the virtual machine when it is - imported as well as the name of the PVM directory when the virtual machine - is exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is the - name of the build. +- `vm_name` (string) - This is the name of the virtual machine when it is + imported as well as the name of the PVM directory when the virtual machine + is exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is the + name of the build. ## Parallels Tools @@ -168,31 +171,31 @@ simulating a human actually typing the keyboard. There are a set of special keys available. If these are in your boot command, they will be replaced by the proper key: -- `` - Backspace +- `` - Backspace -- `` - Delete +- `` - Delete -- `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -- `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -- `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -- `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -- `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -- `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -- `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -- `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -- `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -- `` `` `` - Adds a 1, 5 or 10 second pause before sending - any additional keys. This is useful if you have to generally wait for the UI - to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before + sending any additional keys. This is useful if you have to generally wait + for the UI to update before typing more. In addition to the special keys, each command to type is treated as a [configuration template](/docs/templates/configuration-templates.html). The diff --git a/website/source/docs/builders/parallels.html.markdown b/website/source/docs/builders/parallels.html.markdown index 7d355eaef..582f8e0af 100644 --- a/website/source/docs/builders/parallels.html.markdown +++ b/website/source/docs/builders/parallels.html.markdown @@ -16,16 +16,16 @@ Packer actually comes with multiple builders able to create Parallels machines, depending on the strategy you want to use to build the image. Packer supports the following Parallels builders: -- [parallels-iso](/docs/builders/parallels-iso.html) - Starts from an ISO file, - creates a brand new Parallels VM, installs an OS, provisions software within - the OS, then exports that machine to create an image. This is best for people - who want to start from scratch. +- [parallels-iso](/docs/builders/parallels-iso.html) - Starts from an ISO + file, creates a brand new Parallels VM, installs an OS, provisions software + within the OS, then exports that machine to create an image. This is best + for people who want to start from scratch. -- [parallels-pvm](/docs/builders/parallels-pvm.html) - This builder imports an - existing PVM file, runs provisioners on top of that VM, and exports that - machine to create an image. This is best if you have an existing Parallels VM - export you want to use as the source. As an additional benefit, you can feed - the artifact of this builder back into itself to iterate on a machine. +- [parallels-pvm](/docs/builders/parallels-pvm.html) - This builder imports an + existing PVM file, runs provisioners on top of that VM, and exports that + machine to create an image. This is best if you have an existing Parallels + VM export you want to use as the source. As an additional benefit, you can + feed the artifact of this builder back into itself to iterate on a machine. ## Requirements diff --git a/website/source/docs/builders/qemu.html.markdown b/website/source/docs/builders/qemu.html.markdown index 57c53e4c0..651c69122 100644 --- a/website/source/docs/builders/qemu.html.markdown +++ b/website/source/docs/builders/qemu.html.markdown @@ -81,124 +81,124 @@ builder. ### Required: -- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO files - are so large, this is required and Packer will verify it prior to booting a - virtual machine with the ISO attached. The type of the checksum is specified - with `iso_checksum_type`, documented below. +- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO + files are so large, this is required and Packer will verify it prior to + booting a virtual machine with the ISO attached. The type of the checksum is + specified with `iso_checksum_type`, documented below. -- `iso_checksum_type` (string) - The type of the checksum specified in - `iso_checksum`. Valid values are "md5", "sha1", "sha256", or - "sha512" currently. +- `iso_checksum_type` (string) - The type of the checksum specified in + `iso_checksum`. Valid values are "md5", "sha1", "sha256", or + "sha512" currently. -- `iso_url` (string) - A URL to the ISO containing the installation image. This - URL can be either an HTTP URL or a file URL (or path to a file). If this is an - HTTP URL, Packer will download it and cache it between runs. +- `iso_url` (string) - A URL to the ISO containing the installation image. + This URL can be either an HTTP URL or a file URL (or path to a file). If + this is an HTTP URL, Packer will download it and cache it between runs. -- `ssh_username` (string) - The username to use to SSH into the machine once the - OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once + the OS is installed. ### Optional: -- `accelerator` (string) - The accelerator type to use when running the VM. This - may have a value of either "none", "kvm", "tcg", or "xen" and you must have - that support in on the machine on which you run the builder. By default "kvm" - is used. +- `accelerator` (string) - The accelerator type to use when running the VM. + This may have a value of either "none", "kvm", "tcg", or "xen" and you must + have that support in on the machine on which you run the builder. By default + "kvm" is used. -- `boot_command` (array of strings) - This is an array of commands to type when - the virtual machine is first booted. The goal of these commands should be to - type just enough to initialize the operating system installer. Special keys - can be typed as well, and are covered in the section below on the - boot command. If this is not specified, it is assumed the installer will - start itself. +- `boot_command` (array of strings) - This is an array of commands to type + when the virtual machine is first booted. The goal of these commands should + be to type just enough to initialize the operating system installer. Special + keys can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -- `boot_wait` (string) - The time to wait after booting the initial virtual - machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five - seconds and one minute 30 seconds, respectively. If this isn't specified, the - default is 10 seconds. +- `boot_wait` (string) - The time to wait after booting the initial virtual + machine before typing the `boot_command`. The value of this should be + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait + five seconds and one minute 30 seconds, respectively. If this isn't + specified, the default is 10 seconds. -- `disk_cache` (string) - The cache mode to use for disk. Allowed values include - any of "writethrough", "writeback", "none", "unsafe" or "directsync". By - default, this is set to "writeback". +- `disk_cache` (string) - The cache mode to use for disk. Allowed values + include any of "writethrough", "writeback", "none", "unsafe" + or "directsync". By default, this is set to "writeback". -- `disk_discard` (string) - The discard mode to use for disk. Allowed values - include any of "unmap" or "ignore". By default, this is set to "ignore". +- `disk_discard` (string) - The discard mode to use for disk. Allowed values + include any of "unmap" or "ignore". By default, this is set to "ignore". -- `disk_image` (boolean) - Packer defaults to building from an ISO file, this - parameter controls whether the ISO URL supplied is actually a bootable - QEMU image. When this value is set to true, the machine will clone the source, - resize it according to `disk_size` and boot the image. +- `disk_image` (boolean) - Packer defaults to building from an ISO file, this + parameter controls whether the ISO URL supplied is actually a bootable + QEMU image. When this value is set to true, the machine will clone the + source, resize it according to `disk_size` and boot the image. -- `disk_interface` (string) - The interface to use for the disk. Allowed values - include any of "ide," "scsi" or "virtio." Note also that any boot commands or - kickstart type scripts must have proper adjustments for resulting - device names. The Qemu builder uses "virtio" by default. +- `disk_interface` (string) - The interface to use for the disk. Allowed + values include any of "ide," "scsi" or "virtio." Note also that any boot + commands or kickstart type scripts must have proper adjustments for + resulting device names. The Qemu builder uses "virtio" by default. -- `disk_size` (integer) - The size, in megabytes, of the hard disk to create for - the VM. By default, this is 40000 (about 40 GB). +- `disk_size` (integer) - The size, in megabytes, of the hard disk to create + for the VM. By default, this is 40000 (about 40 GB). -- `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful for - unattended Windows installs, which look for an `Autounattend.xml` file on - removable media. By default, no floppy will be attached. All files listed in - this setting get placed into the root directory of the floppy and the floppy - is attached as the first floppy device. Currently, no support exists for - creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) - are allowed. Directory names are also allowed, which will add all the files - found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, + and \[\]) are allowed. Directory names are also allowed, which will add all + the files found in the directory to the floppy. -- `format` (string) - Either "qcow2" or "raw", this specifies the output format - of the virtual machine image. This defaults to "qcow2". +- `format` (string) - Either "qcow2" or "raw", this specifies the output + format of the virtual machine image. This defaults to "qcow2". -- `headless` (boolean) - Packer defaults to building QEMU virtual machines by - launching a GUI that shows the console of the machine being built. When this - value is set to true, the machine will start without a console. +- `headless` (boolean) - Packer defaults to building QEMU virtual machines by + launching a GUI that shows the console of the machine being built. When this + value is set to true, the machine will start without a console. -- `http_directory` (string) - Path to a directory to serve using an HTTP server. - The files in this directory will be available over HTTP that will be - requestable from the virtual machine. This is useful for hosting kickstart - files and so on. By default this is "", which means no HTTP server will - be started. The address and port of the HTTP server will be available as - variables in `boot_command`. This is covered in more detail below. +- `http_directory` (string) - Path to a directory to serve using an + HTTP server. The files in this directory will be available over HTTP that + will be requestable from the virtual machine. This is useful for hosting + kickstart files and so on. By default this is "", which means no HTTP server + will be started. The address and port of the HTTP server will be available + as variables in `boot_command`. This is covered in more detail below. -- `http_port_min` and `http_port_max` (integer) - These are the minimum and - maximum port to use for the HTTP server started to serve the `http_directory`. - Because Packer often runs in parallel, Packer will choose a randomly available - port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. By - default the values are 8000 and 9000, respectively. +- `http_port_min` and `http_port_max` (integer) - These are the minimum and + maximum port to use for the HTTP server started to serve the + `http_directory`. Because Packer often runs in parallel, Packer will choose + a randomly available port in this range to run the HTTP server. If you want + to force the HTTP server to be on one port, make this minimum and maximum + port the same. By default the values are 8000 and 9000, respectively. -- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer - will try these in order. If anything goes wrong attempting to download or - while downloading a single URL, it will move on to the next. All URLs must - point to the same file (same checksum). By default this is empty and `iso_url` - is used. Only one of `iso_url` or `iso_urls` can be specified. +- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. + Packer will try these in order. If anything goes wrong attempting to + download or while downloading a single URL, it will move on to the next. All + URLs must point to the same file (same checksum). By default this is empty + and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. -- `machine_type` (string) - The type of machine emulation to use. Run your qemu - binary with the flags `-machine help` to list available types for your system. - This defaults to "pc". +- `machine_type` (string) - The type of machine emulation to use. Run your + qemu binary with the flags `-machine help` to list available types for + your system. This defaults to "pc". -- `net_device` (string) - The driver to use for the network interface. Allowed - values "ne2k\_pci," "i82551," "i82557b," "i82559er," "rtl8139," "e1000," - "pcnet" or "virtio." The Qemu builder uses "virtio" by default. +- `net_device` (string) - The driver to use for the network interface. Allowed + values "ne2k\_pci," "i82551," "i82557b," "i82559er," "rtl8139," "e1000," + "pcnet" or "virtio." The Qemu builder uses "virtio" by default. -- `output_directory` (string) - This is the path to the directory where the - resulting virtual machine will be created. This may be relative or absolute. - If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running - the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the - name of the build. +- `output_directory` (string) - This is the path to the directory where the + resulting virtual machine will be created. This may be relative or absolute. + If relative, the path is relative to the working directory when `packer` + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -- `qemu_binary` (string) - The name of the Qemu binary to look for. This - defaults to "qemu-system-x86\_64", but may need to be changed for - some platforms. For example "qemu-kvm", or "qemu-system-i386" may be a better - choice for some systems. +- `qemu_binary` (string) - The name of the Qemu binary to look for. This + defaults to "qemu-system-x86\_64", but may need to be changed for + some platforms. For example "qemu-kvm", or "qemu-system-i386" may be a + better choice for some systems. -- `qemuargs` (array of array of strings) - Allows complete control over the qemu - command line (though not, at this time, qemu-img). Each array of strings makes - up a command line switch that overrides matching default switch/value pairs. - Any value specified as an empty string is ignored. All values after the switch - are concatenated with no separator. +- `qemuargs` (array of array of strings) - Allows complete control over the + qemu command line (though not, at this time, qemu-img). Each array of + strings makes up a command line switch that overrides matching default + switch/value pairs. Any value specified as an empty string is ignored. All + values after the switch are concatenated with no separator. \~> **Warning:** The qemu command line allows extreme flexibility, so beware of conflicting arguments causing failures of your run. For instance, using @@ -207,7 +207,7 @@ shutdown -P now) to the virtual machine, thus preventing proper shutdown. To see the defaults, look in the packer.log file and search for the qemu-system-x86 command. The arguments are all printed for review. - The following shows a sample usage: +The following shows a sample usage: ``` {.javascript} // ... @@ -225,34 +225,35 @@ command. The arguments are all printed for review. // ... ``` - would produce the following (not including other defaults supplied by the builder and not otherwise conflicting with the qemuargs): +would produce the following (not including other defaults supplied by the +builder and not otherwise conflicting with the qemuargs):
       qemu-system-x86 -m 1024m --no-acpi -netdev user,id=mynet0,hostfwd=hostip:hostport-guestip:guestport -device virtio-net,netdev=mynet0"
     
    -- `shutdown_command` (string) - The command to use to gracefully shut down the - machine once all the provisioning is done. By default this is an empty string, - which tells Packer to just forcefully shut down the machine. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty + string, which tells Packer to just forcefully shut down the machine. -- `shutdown_timeout` (string) - The amount of time to wait after executing the - `shutdown_command` for the virtual machine to actually shut down. If it - doesn't shut down in this time, it is an error. By default, the timeout is - "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and - maximum port to use for the SSH port on the host machine which is forwarded to - the SSH port on the guest machine. Because Packer often runs in parallel, - Packer will choose a randomly available port in this range to use as the - host port. +- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and + maximum port to use for the SSH port on the host machine which is forwarded + to the SSH port on the guest machine. Because Packer often runs in parallel, + Packer will choose a randomly available port in this range to use as the + host port. -- `vm_name` (string) - This is the name of the image (QCOW2 or IMG) file for the - new virtual machine, without the file extension. By default this is - "packer-BUILDNAME", where "BUILDNAME" is the name of the build. +- `vm_name` (string) - This is the name of the image (QCOW2 or IMG) file for + the new virtual machine, without the file extension. By default this is + "packer-BUILDNAME", where "BUILDNAME" is the name of the build. -- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to - use for the VNC port on the host machine which is forwarded to the VNC port on - the guest machine. Because Packer often runs in parallel, Packer will choose a - randomly available port in this range to use as the host port. +- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port + to use for the VNC port on the host machine which is forwarded to the VNC + port on the guest machine. Because Packer often runs in parallel, Packer + will choose a randomly available port in this range to use as the host port. ## Boot Command @@ -270,40 +271,40 @@ machine, simulating a human actually typing the keyboard. There are a set of special keys available. If these are in your boot command, they will be replaced by the proper key: -- `` - Backspace +- `` - Backspace -- `` - Delete +- `` - Delete -- `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -- `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -- `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -- `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -- `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -- `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -- `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -- `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -- `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -- `` `` `` - Adds a 1, 5 or 10 second pause before sending - any additional keys. This is useful if you have to generally wait for the UI - to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before + sending any additional keys. This is useful if you have to generally wait + for the UI to update before typing more. In addition to the special keys, each command to type is treated as a [configuration template](/docs/templates/configuration-templates.html). The available variables are: -- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server that - is started serving the directory specified by the `http_directory` - configuration parameter. If `http_directory` isn't specified, these will be - blank! +- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server + that is started serving the directory specified by the `http_directory` + configuration parameter. If `http_directory` isn't specified, these will be + blank! Example boot command. This is actually a working boot command used to start an CentOS 6.4 installer: diff --git a/website/source/docs/builders/virtualbox-iso.html.markdown b/website/source/docs/builders/virtualbox-iso.html.markdown index bdccdf768..7df4975dc 100644 --- a/website/source/docs/builders/virtualbox-iso.html.markdown +++ b/website/source/docs/builders/virtualbox-iso.html.markdown @@ -54,177 +54,179 @@ builder. ### Required: -- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO files - are so large, this is required and Packer will verify it prior to booting a - virtual machine with the ISO attached. The type of the checksum is specified - with `iso_checksum_type`, documented below. +- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO + files are so large, this is required and Packer will verify it prior to + booting a virtual machine with the ISO attached. The type of the checksum is + specified with `iso_checksum_type`, documented below. -- `iso_checksum_type` (string) - The type of the checksum specified in - `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or - "sha512" currently. While "none" will skip checksumming, this is not - recommended since ISO files are generally large and corruption does happen - from time to time. +- `iso_checksum_type` (string) - The type of the checksum specified in + `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or + "sha512" currently. While "none" will skip checksumming, this is not + recommended since ISO files are generally large and corruption does happen + from time to time. -- `iso_url` (string) - A URL to the ISO containing the installation image. This - URL can be either an HTTP URL or a file URL (or path to a file). If this is an - HTTP URL, Packer will download it and cache it between runs. +- `iso_url` (string) - A URL to the ISO containing the installation image. + This URL can be either an HTTP URL or a file URL (or path to a file). If + this is an HTTP URL, Packer will download it and cache it between runs. -- `ssh_username` (string) - The username to use to SSH into the machine once the - OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once + the OS is installed. ### Optional: -- `boot_command` (array of strings) - This is an array of commands to type when - the virtual machine is first booted. The goal of these commands should be to - type just enough to initialize the operating system installer. Special keys - can be typed as well, and are covered in the section below on the - boot command. If this is not specified, it is assumed the installer will - start itself. +- `boot_command` (array of strings) - This is an array of commands to type + when the virtual machine is first booted. The goal of these commands should + be to type just enough to initialize the operating system installer. Special + keys can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -- `boot_wait` (string) - The time to wait after booting the initial virtual - machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five - seconds and one minute 30 seconds, respectively. If this isn't specified, the - default is 10 seconds. +- `boot_wait` (string) - The time to wait after booting the initial virtual + machine before typing the `boot_command`. The value of this should be + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait + five seconds and one minute 30 seconds, respectively. If this isn't + specified, the default is 10 seconds. -- `disk_size` (integer) - The size, in megabytes, of the hard disk to create for - the VM. By default, this is 40000 (about 40 GB). +- `disk_size` (integer) - The size, in megabytes, of the hard disk to create + for the VM. By default, this is 40000 (about 40 GB). -- `export_opts` (array of strings) - Additional options to pass to the - `VBoxManage export`. This can be useful for passing product information to - include in the resulting appliance file. +- `export_opts` (array of strings) - Additional options to pass to the + `VBoxManage export`. This can be useful for passing product information to + include in the resulting appliance file. -- `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful for - unattended Windows installs, which look for an `Autounattend.xml` file on - removable media. By default, no floppy will be attached. All files listed in - this setting get placed into the root directory of the floppy and the floppy - is attached as the first floppy device. Currently, no support exists for - creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) - are allowed. Directory names are also allowed, which will add all the files - found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, + and \[\]) are allowed. Directory names are also allowed, which will add all + the files found in the directory to the floppy. -- `format` (string) - Either "ovf" or "ova", this specifies the output format of - the exported virtual machine. This defaults to "ovf". +- `format` (string) - Either "ovf" or "ova", this specifies the output format + of the exported virtual machine. This defaults to "ovf". -- `guest_additions_mode` (string) - The method by which guest additions are made - available to the guest for installation. Valid options are "upload", "attach", - or "disable". If the mode is "attach" the guest additions ISO will be attached - as a CD device to the virtual machine. If the mode is "upload" the guest - additions ISO will be uploaded to the path specified by - `guest_additions_path`. The default value is "upload". If "disable" is used, - guest additions won't be downloaded, either. +- `guest_additions_mode` (string) - The method by which guest additions are + made available to the guest for installation. Valid options are "upload", + "attach", or "disable". If the mode is "attach" the guest additions ISO will + be attached as a CD device to the virtual machine. If the mode is "upload" + the guest additions ISO will be uploaded to the path specified by + `guest_additions_path`. The default value is "upload". If "disable" is used, + guest additions won't be downloaded, either. -- `guest_additions_path` (string) - The path on the guest virtual machine where - the VirtualBox guest additions ISO will be uploaded. By default this is - "VBoxGuestAdditions.iso" which should upload into the login directory of - the user. This is a [configuration - template](/docs/templates/configuration-templates.html) where the `Version` - variable is replaced with the VirtualBox version. +- `guest_additions_path` (string) - The path on the guest virtual machine + where the VirtualBox guest additions ISO will be uploaded. By default this + is "VBoxGuestAdditions.iso" which should upload into the login directory of + the user. This is a [configuration + template](/docs/templates/configuration-templates.html) where the `Version` + variable is replaced with the VirtualBox version. -- `guest_additions_sha256` (string) - The SHA256 checksum of the guest additions - ISO that will be uploaded to the guest VM. By default the checksums will be - downloaded from the VirtualBox website, so this only needs to be set if you - want to be explicit about the checksum. +- `guest_additions_sha256` (string) - The SHA256 checksum of the guest + additions ISO that will be uploaded to the guest VM. By default the + checksums will be downloaded from the VirtualBox website, so this only needs + to be set if you want to be explicit about the checksum. -- `guest_additions_url` (string) - The URL to the guest additions ISO to upload. - This can also be a file URL if the ISO is at a local path. By default, the - VirtualBox builder will attempt to find the guest additions ISO on the local - file system. If it is not available locally, the builder will download the - proper guest additions ISO from the internet. +- `guest_additions_url` (string) - The URL to the guest additions ISO + to upload. This can also be a file URL if the ISO is at a local path. By + default, the VirtualBox builder will attempt to find the guest additions ISO + on the local file system. If it is not available locally, the builder will + download the proper guest additions ISO from the internet. -- `guest_os_type` (string) - The guest OS type being installed. By default this - is "other", but you can get *dramatic* performance improvements by setting - this to the proper value. To view all available values for this run - `VBoxManage list ostypes`. Setting the correct value hints to VirtualBox how - to optimize the virtual hardware to work best with that operating system. +- `guest_os_type` (string) - The guest OS type being installed. By default + this is "other", but you can get *dramatic* performance improvements by + setting this to the proper value. To view all available values for this run + `VBoxManage list ostypes`. Setting the correct value hints to VirtualBox how + to optimize the virtual hardware to work best with that operating system. -- `hard_drive_interface` (string) - The type of controller that the primary hard - drive is attached to, defaults to "ide". When set to "sata", the drive is - attached to an AHCI SATA controller. When set to "scsi", the drive is attached - to an LsiLogic SCSI controller. +- `hard_drive_interface` (string) - The type of controller that the primary + hard drive is attached to, defaults to "ide". When set to "sata", the drive + is attached to an AHCI SATA controller. When set to "scsi", the drive is + attached to an LsiLogic SCSI controller. -- `headless` (boolean) - Packer defaults to building VirtualBox virtual machines - by launching a GUI that shows the console of the machine being built. When - this value is set to true, the machine will start without a console. +- `headless` (boolean) - Packer defaults to building VirtualBox virtual + machines by launching a GUI that shows the console of the machine + being built. When this value is set to true, the machine will start without + a console. -- `http_directory` (string) - Path to a directory to serve using an HTTP server. - The files in this directory will be available over HTTP that will be - requestable from the virtual machine. This is useful for hosting kickstart - files and so on. By default this is "", which means no HTTP server will - be started. The address and port of the HTTP server will be available as - variables in `boot_command`. This is covered in more detail below. +- `http_directory` (string) - Path to a directory to serve using an + HTTP server. The files in this directory will be available over HTTP that + will be requestable from the virtual machine. This is useful for hosting + kickstart files and so on. By default this is "", which means no HTTP server + will be started. The address and port of the HTTP server will be available + as variables in `boot_command`. This is covered in more detail below. -- `http_port_min` and `http_port_max` (integer) - These are the minimum and - maximum port to use for the HTTP server started to serve the `http_directory`. - Because Packer often runs in parallel, Packer will choose a randomly available - port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. By - default the values are 8000 and 9000, respectively. +- `http_port_min` and `http_port_max` (integer) - These are the minimum and + maximum port to use for the HTTP server started to serve the + `http_directory`. Because Packer often runs in parallel, Packer will choose + a randomly available port in this range to run the HTTP server. If you want + to force the HTTP server to be on one port, make this minimum and maximum + port the same. By default the values are 8000 and 9000, respectively. -- `iso_interface` (string) - The type of controller that the ISO is attached to, - defaults to "ide". When set to "sata", the drive is attached to an AHCI - SATA controller. +- `iso_interface` (string) - The type of controller that the ISO is attached + to, defaults to "ide". When set to "sata", the drive is attached to an AHCI + SATA controller. -- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer - will try these in order. If anything goes wrong attempting to download or - while downloading a single URL, it will move on to the next. All URLs must - point to the same file (same checksum). By default this is empty and `iso_url` - is used. Only one of `iso_url` or `iso_urls` can be specified. +- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. + Packer will try these in order. If anything goes wrong attempting to + download or while downloading a single URL, it will move on to the next. All + URLs must point to the same file (same checksum). By default this is empty + and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. -- `output_directory` (string) - This is the path to the directory where the - resulting virtual machine will be created. This may be relative or absolute. - If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running - the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the - name of the build. +- `output_directory` (string) - This is the path to the directory where the + resulting virtual machine will be created. This may be relative or absolute. + If relative, the path is relative to the working directory when `packer` + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -- `shutdown_command` (string) - The command to use to gracefully shut down the - machine once all the provisioning is done. By default this is an empty string, - which tells Packer to just forcefully shut down the machine unless a shutdown - command takes place inside script so this may safely be omitted. If one or - more scripts require a reboot it is suggested to leave this blank since - reboots may fail and specify the final shutdown command in your last script. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty + string, which tells Packer to just forcefully shut down the machine unless a + shutdown command takes place inside script so this may safely be omitted. If + one or more scripts require a reboot it is suggested to leave this blank + since reboots may fail and specify the final shutdown command in your + last script. -- `shutdown_timeout` (string) - The amount of time to wait after executing the - `shutdown_command` for the virtual machine to actually shut down. If it - doesn't shut down in this time, it is an error. By default, the timeout is - "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and - maximum port to use for the SSH port on the host machine which is forwarded to - the SSH port on the guest machine. Because Packer often runs in parallel, - Packer will choose a randomly available port in this range to use as the - host port. +- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and + maximum port to use for the SSH port on the host machine which is forwarded + to the SSH port on the guest machine. Because Packer often runs in parallel, + Packer will choose a randomly available port in this range to use as the + host port. -- `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer - does not setup forwarded port mapping for SSH requests and uses `ssh_port` on - the host to communicate to the virtual machine +- `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer + does not setup forwarded port mapping for SSH requests and uses `ssh_port` + on the host to communicate to the virtual machine -- `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to - execute in order to further customize the virtual machine being created. The - value of this is an array of commands to execute. The commands are executed in - the order defined in the template. For each command, the command is defined - itself as an array of strings, where each string represents a single argument - on the command-line to `VBoxManage` (but excluding `VBoxManage` itself). Each - arg is treated as a [configuration - template](/docs/templates/configuration-templates.html), where the `Name` - variable is replaced with the VM name. More details on how to use `VBoxManage` - are below. +- `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to + execute in order to further customize the virtual machine being created. The + value of this is an array of commands to execute. The commands are executed + in the order defined in the template. For each command, the command is + defined itself as an array of strings, where each string represents a single + argument on the command-line to `VBoxManage` (but excluding + `VBoxManage` itself). Each arg is treated as a [configuration + template](/docs/templates/configuration-templates.html), where the `Name` + variable is replaced with the VM name. More details on how to use + `VBoxManage` are below. -- `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`, - except that it is run after the virtual machine is shutdown, and before the - virtual machine is exported. +- `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`, + except that it is run after the virtual machine is shutdown, and before the + virtual machine is exported. -- `virtualbox_version_file` (string) - The path within the virtual machine to - upload a file that contains the VirtualBox version that was used to create - the machine. This information can be useful for provisioning. By default this - is ".vbox\_version", which will generally be upload it into the - home directory. +- `virtualbox_version_file` (string) - The path within the virtual machine to + upload a file that contains the VirtualBox version that was used to create + the machine. This information can be useful for provisioning. By default + this is ".vbox\_version", which will generally be upload it into the + home directory. -- `vm_name` (string) - This is the name of the OVF file for the new virtual - machine, without the file extension. By default this is "packer-BUILDNAME", - where "BUILDNAME" is the name of the build. +- `vm_name` (string) - This is the name of the OVF file for the new virtual + machine, without the file extension. By default this is "packer-BUILDNAME", + where "BUILDNAME" is the name of the build. ## Boot Command @@ -242,40 +244,40 @@ machine, simulating a human actually typing the keyboard. There are a set of special keys available. If these are in your boot command, they will be replaced by the proper key: -- `` - Backspace +- `` - Backspace -- `` - Delete +- `` - Delete -- `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -- `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -- `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -- `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -- `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -- `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -- `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -- `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -- `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -- `` `` `` - Adds a 1, 5 or 10 second pause before sending - any additional keys. This is useful if you have to generally wait for the UI - to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before + sending any additional keys. This is useful if you have to generally wait + for the UI to update before typing more. In addition to the special keys, each command to type is treated as a [configuration template](/docs/templates/configuration-templates.html). The available variables are: -- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server that - is started serving the directory specified by the `http_directory` - configuration parameter. If `http_directory` isn't specified, these will be - blank! +- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server + that is started serving the directory specified by the `http_directory` + configuration parameter. If `http_directory` isn't specified, these will be + blank! Example boot command. This is actually a working boot command used to start an Ubuntu 12.04 installer: diff --git a/website/source/docs/builders/virtualbox-ovf.html.markdown b/website/source/docs/builders/virtualbox-ovf.html.markdown index dcf5dbd5c..b9b2de033 100644 --- a/website/source/docs/builders/virtualbox-ovf.html.markdown +++ b/website/source/docs/builders/virtualbox-ovf.html.markdown @@ -19,11 +19,13 @@ image). When exporting from VirtualBox make sure to choose OVF Version 2, since Version 1 is not compatible and will generate errors like this: - ==> virtualbox-ovf: Progress state: VBOX_E_FILE_ERROR - ==> virtualbox-ovf: VBoxManage: error: Appliance read failed - ==> virtualbox-ovf: VBoxManage: error: Error reading "source.ova": element "Section" has no "type" attribute, line 21 - ==> virtualbox-ovf: VBoxManage: error: Details: code VBOX_E_FILE_ERROR (0x80bb0004), component Appliance, interface IAppliance - ==> virtualbox-ovf: VBoxManage: error: Context: "int handleImportAppliance(HandlerArg*)" at line 304 of file VBoxManageAppliance.cpp +==> virtualbox-ovf: Progress state: VBOX\_E\_FILE\_ERROR ==> +virtualbox-ovf: VBoxManage: error: Appliance read failed ==> virtualbox-ovf: +VBoxManage: error: Error reading "source.ova": element "Section" has no "type" +attribute, line 21 ==> virtualbox-ovf: VBoxManage: error: Details: code +VBOX\_E\_FILE\_ERROR (0x80bb0004), component Appliance, interface IAppliance +==> virtualbox-ovf: VBoxManage: error: Context: "int +handleImportAppliance(HandlerArg\*)" at line 304 of file VBoxManageAppliance.cpp The builder builds a virtual machine by importing an existing OVF or OVA file. It then boots this image, runs provisioners on this new VM, and exports that VM @@ -61,149 +63,151 @@ builder. ### Required: -- `source_path` (string) - The path to an OVF or OVA file that acts as the - source of this build. +- `source_path` (string) - The path to an OVF or OVA file that acts as the + source of this build. -- `ssh_username` (string) - The username to use to SSH into the machine once the - OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once + the OS is installed. ### Optional: -- `boot_command` (array of strings) - This is an array of commands to type when - the virtual machine is first booted. The goal of these commands should be to - type just enough to initialize the operating system installer. Special keys - can be typed as well, and are covered in the section below on the - boot command. If this is not specified, it is assumed the installer will - start itself. +- `boot_command` (array of strings) - This is an array of commands to type + when the virtual machine is first booted. The goal of these commands should + be to type just enough to initialize the operating system installer. Special + keys can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -- `boot_wait` (string) - The time to wait after booting the initial virtual - machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five - seconds and one minute 30 seconds, respectively. If this isn't specified, the - default is 10 seconds. +- `boot_wait` (string) - The time to wait after booting the initial virtual + machine before typing the `boot_command`. The value of this should be + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait + five seconds and one minute 30 seconds, respectively. If this isn't + specified, the default is 10 seconds. -- `export_opts` (array of strings) - Additional options to pass to the - `VBoxManage export`. This can be useful for passing product information to - include in the resulting appliance file. +- `export_opts` (array of strings) - Additional options to pass to the + `VBoxManage export`. This can be useful for passing product information to + include in the resulting appliance file. -- `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful for - unattended Windows installs, which look for an `Autounattend.xml` file on - removable media. By default, no floppy will be attached. All files listed in - this setting get placed into the root directory of the floppy and the floppy - is attached as the first floppy device. Currently, no support exists for - creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) - are allowed. Directory names are also allowed, which will add all the files - found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, + and \[\]) are allowed. Directory names are also allowed, which will add all + the files found in the directory to the floppy. -- `format` (string) - Either "ovf" or "ova", this specifies the output format of - the exported virtual machine. This defaults to "ovf". +- `format` (string) - Either "ovf" or "ova", this specifies the output format + of the exported virtual machine. This defaults to "ovf". -- `guest_additions_mode` (string) - The method by which guest additions are made - available to the guest for installation. Valid options are "upload", "attach", - or "disable". If the mode is "attach" the guest additions ISO will be attached - as a CD device to the virtual machine. If the mode is "upload" the guest - additions ISO will be uploaded to the path specified by - `guest_additions_path`. The default value is "upload". If "disable" is used, - guest additions won't be downloaded, either. +- `guest_additions_mode` (string) - The method by which guest additions are + made available to the guest for installation. Valid options are "upload", + "attach", or "disable". If the mode is "attach" the guest additions ISO will + be attached as a CD device to the virtual machine. If the mode is "upload" + the guest additions ISO will be uploaded to the path specified by + `guest_additions_path`. The default value is "upload". If "disable" is used, + guest additions won't be downloaded, either. -- `guest_additions_path` (string) - The path on the guest virtual machine where - the VirtualBox guest additions ISO will be uploaded. By default this is - "VBoxGuestAdditions.iso" which should upload into the login directory of - the user. This is a [configuration - template](/docs/templates/configuration-templates.html) where the `Version` - variable is replaced with the VirtualBox version. +- `guest_additions_path` (string) - The path on the guest virtual machine + where the VirtualBox guest additions ISO will be uploaded. By default this + is "VBoxGuestAdditions.iso" which should upload into the login directory of + the user. This is a [configuration + template](/docs/templates/configuration-templates.html) where the `Version` + variable is replaced with the VirtualBox version. -- `guest_additions_sha256` (string) - The SHA256 checksum of the guest additions - ISO that will be uploaded to the guest VM. By default the checksums will be - downloaded from the VirtualBox website, so this only needs to be set if you - want to be explicit about the checksum. +- `guest_additions_sha256` (string) - The SHA256 checksum of the guest + additions ISO that will be uploaded to the guest VM. By default the + checksums will be downloaded from the VirtualBox website, so this only needs + to be set if you want to be explicit about the checksum. -- `guest_additions_url` (string) - The URL to the guest additions ISO to upload. - This can also be a file URL if the ISO is at a local path. By default the - VirtualBox builder will go and download the proper guest additions ISO from - the internet. +- `guest_additions_url` (string) - The URL to the guest additions ISO + to upload. This can also be a file URL if the ISO is at a local path. By + default the VirtualBox builder will go and download the proper guest + additions ISO from the internet. -- `headless` (boolean) - Packer defaults to building VirtualBox virtual machines - by launching a GUI that shows the console of the machine being built. When - this value is set to true, the machine will start without a console. +- `headless` (boolean) - Packer defaults to building VirtualBox virtual + machines by launching a GUI that shows the console of the machine + being built. When this value is set to true, the machine will start without + a console. -- `http_directory` (string) - Path to a directory to serve using an HTTP server. - The files in this directory will be available over HTTP that will be - requestable from the virtual machine. This is useful for hosting kickstart - files and so on. By default this is "", which means no HTTP server will - be started. The address and port of the HTTP server will be available as - variables in `boot_command`. This is covered in more detail below. +- `http_directory` (string) - Path to a directory to serve using an + HTTP server. The files in this directory will be available over HTTP that + will be requestable from the virtual machine. This is useful for hosting + kickstart files and so on. By default this is "", which means no HTTP server + will be started. The address and port of the HTTP server will be available + as variables in `boot_command`. This is covered in more detail below. -- `http_port_min` and `http_port_max` (integer) - These are the minimum and - maximum port to use for the HTTP server started to serve the `http_directory`. - Because Packer often runs in parallel, Packer will choose a randomly available - port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. By - default the values are 8000 and 9000, respectively. +- `http_port_min` and `http_port_max` (integer) - These are the minimum and + maximum port to use for the HTTP server started to serve the + `http_directory`. Because Packer often runs in parallel, Packer will choose + a randomly available port in this range to run the HTTP server. If you want + to force the HTTP server to be on one port, make this minimum and maximum + port the same. By default the values are 8000 and 9000, respectively. -- `import_flags` (array of strings) - Additional flags to pass to - `VBoxManage import`. This can be used to add additional command-line flags - such as `--eula-accept` to accept a EULA in the OVF. +- `import_flags` (array of strings) - Additional flags to pass to + `VBoxManage import`. This can be used to add additional command-line flags + such as `--eula-accept` to accept a EULA in the OVF. -- `import_opts` (string) - Additional options to pass to the - `VBoxManage import`. This can be useful for passing "keepallmacs" or - "keepnatmacs" options for existing ovf images. +- `import_opts` (string) - Additional options to pass to the + `VBoxManage import`. This can be useful for passing "keepallmacs" or + "keepnatmacs" options for existing ovf images. -- `output_directory` (string) - This is the path to the directory where the - resulting virtual machine will be created. This may be relative or absolute. - If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running - the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the - name of the build. +- `output_directory` (string) - This is the path to the directory where the + resulting virtual machine will be created. This may be relative or absolute. + If relative, the path is relative to the working directory when `packer` + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -- `shutdown_command` (string) - The command to use to gracefully shut down the - machine once all the provisioning is done. By default this is an empty string, - which tells Packer to just forcefully shut down the machine unless a shutdown - command takes place inside script so this may safely be omitted. If one or - more scripts require a reboot it is suggested to leave this blank since - reboots may fail and specify the final shutdown command in your last script. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty + string, which tells Packer to just forcefully shut down the machine unless a + shutdown command takes place inside script so this may safely be omitted. If + one or more scripts require a reboot it is suggested to leave this blank + since reboots may fail and specify the final shutdown command in your + last script. -- `shutdown_timeout` (string) - The amount of time to wait after executing the - `shutdown_command` for the virtual machine to actually shut down. If it - doesn't shut down in this time, it is an error. By default, the timeout is - "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and - maximum port to use for the SSH port on the host machine which is forwarded to - the SSH port on the guest machine. Because Packer often runs in parallel, - Packer will choose a randomly available port in this range to use as the - host port. +- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and + maximum port to use for the SSH port on the host machine which is forwarded + to the SSH port on the guest machine. Because Packer often runs in parallel, + Packer will choose a randomly available port in this range to use as the + host port. -- `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer - does not setup forwarded port mapping for SSH requests and uses `ssh_port` on - the host to communicate to the virtual machine +- `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer + does not setup forwarded port mapping for SSH requests and uses `ssh_port` + on the host to communicate to the virtual machine -- `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to - execute in order to further customize the virtual machine being created. The - value of this is an array of commands to execute. The commands are executed in - the order defined in the template. For each command, the command is defined - itself as an array of strings, where each string represents a single argument - on the command-line to `VBoxManage` (but excluding `VBoxManage` itself). Each - arg is treated as a [configuration - template](/docs/templates/configuration-templates.html), where the `Name` - variable is replaced with the VM name. More details on how to use `VBoxManage` - are below. +- `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to + execute in order to further customize the virtual machine being created. The + value of this is an array of commands to execute. The commands are executed + in the order defined in the template. For each command, the command is + defined itself as an array of strings, where each string represents a single + argument on the command-line to `VBoxManage` (but excluding + `VBoxManage` itself). Each arg is treated as a [configuration + template](/docs/templates/configuration-templates.html), where the `Name` + variable is replaced with the VM name. More details on how to use + `VBoxManage` are below. -- `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`, - except that it is run after the virtual machine is shutdown, and before the - virtual machine is exported. +- `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`, + except that it is run after the virtual machine is shutdown, and before the + virtual machine is exported. -- `virtualbox_version_file` (string) - The path within the virtual machine to - upload a file that contains the VirtualBox version that was used to create - the machine. This information can be useful for provisioning. By default this - is ".vbox\_version", which will generally be upload it into the - home directory. +- `virtualbox_version_file` (string) - The path within the virtual machine to + upload a file that contains the VirtualBox version that was used to create + the machine. This information can be useful for provisioning. By default + this is ".vbox\_version", which will generally be upload it into the + home directory. -- `vm_name` (string) - This is the name of the virtual machine when it is - imported as well as the name of the OVF file when the virtual machine - is exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is the - name of the build. +- `vm_name` (string) - This is the name of the virtual machine when it is + imported as well as the name of the OVF file when the virtual machine + is exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is the + name of the build. ## Guest Additions diff --git a/website/source/docs/builders/virtualbox.html.markdown b/website/source/docs/builders/virtualbox.html.markdown index f96d37515..b2064f7d2 100644 --- a/website/source/docs/builders/virtualbox.html.markdown +++ b/website/source/docs/builders/virtualbox.html.markdown @@ -16,13 +16,14 @@ Packer actually comes with multiple builders able to create VirtualBox machines, depending on the strategy you want to use to build the image. Packer supports the following VirtualBox builders: -- [virtualbox-iso](/docs/builders/virtualbox-iso.html) - Starts from an ISO - file, creates a brand new VirtualBox VM, installs an OS, provisions software - within the OS, then exports that machine to create an image. This is best for - people who want to start from scratch. +- [virtualbox-iso](/docs/builders/virtualbox-iso.html) - Starts from an ISO + file, creates a brand new VirtualBox VM, installs an OS, provisions software + within the OS, then exports that machine to create an image. This is best + for people who want to start from scratch. -- [virtualbox-ovf](/docs/builders/virtualbox-ovf.html) - This builder imports an - existing OVF/OVA file, runs provisioners on top of that VM, and exports that - machine to create an image. This is best if you have an existing VirtualBox VM - export you want to use as the source. As an additional benefit, you can feed - the artifact of this builder back into itself to iterate on a machine. +- [virtualbox-ovf](/docs/builders/virtualbox-ovf.html) - This builder imports + an existing OVF/OVA file, runs provisioners on top of that VM, and exports + that machine to create an image. This is best if you have an existing + VirtualBox VM export you want to use as the source. As an additional + benefit, you can feed the artifact of this builder back into itself to + iterate on a machine. diff --git a/website/source/docs/builders/vmware-iso.html.markdown b/website/source/docs/builders/vmware-iso.html.markdown index ad2ac5c33..fba47f0ad 100644 --- a/website/source/docs/builders/vmware-iso.html.markdown +++ b/website/source/docs/builders/vmware-iso.html.markdown @@ -57,195 +57,199 @@ builder. ### Required: -- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO files - are so large, this is required and Packer will verify it prior to booting a - virtual machine with the ISO attached. The type of the checksum is specified - with `iso_checksum_type`, documented below. +- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO + files are so large, this is required and Packer will verify it prior to + booting a virtual machine with the ISO attached. The type of the checksum is + specified with `iso_checksum_type`, documented below. -- `iso_checksum_type` (string) - The type of the checksum specified in - `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or - "sha512" currently. While "none" will skip checksumming, this is not - recommended since ISO files are generally large and corruption does happen - from time to time. +- `iso_checksum_type` (string) - The type of the checksum specified in + `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or + "sha512" currently. While "none" will skip checksumming, this is not + recommended since ISO files are generally large and corruption does happen + from time to time. -- `iso_url` (string) - A URL to the ISO containing the installation image. This - URL can be either an HTTP URL or a file URL (or path to a file). If this is an - HTTP URL, Packer will download it and cache it between runs. +- `iso_url` (string) - A URL to the ISO containing the installation image. + This URL can be either an HTTP URL or a file URL (or path to a file). If + this is an HTTP URL, Packer will download it and cache it between runs. -- `ssh_username` (string) - The username to use to SSH into the machine once the - OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once + the OS is installed. ### Optional: -- `disk_additional_size` (array of integers) - The size(s) of any additional - hard disks for the VM in megabytes. If this is not specified then the VM will - only contain a primary hard disk. The builder uses expandable, not fixed-size - virtual hard disks, so the actual file representing the disk will not use the - full size unless it is full. +- `disk_additional_size` (array of integers) - The size(s) of any additional + hard disks for the VM in megabytes. If this is not specified then the VM + will only contain a primary hard disk. The builder uses expandable, not + fixed-size virtual hard disks, so the actual file representing the disk will + not use the full size unless it is full. -- `boot_command` (array of strings) - This is an array of commands to type when - the virtual machine is first booted. The goal of these commands should be to - type just enough to initialize the operating system installer. Special keys - can be typed as well, and are covered in the section below on the - boot command. If this is not specified, it is assumed the installer will - start itself. +- `boot_command` (array of strings) - This is an array of commands to type + when the virtual machine is first booted. The goal of these commands should + be to type just enough to initialize the operating system installer. Special + keys can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -- `boot_wait` (string) - The time to wait after booting the initial virtual - machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five - seconds and one minute 30 seconds, respectively. If this isn't specified, the - default is 10 seconds. +- `boot_wait` (string) - The time to wait after booting the initial virtual + machine before typing the `boot_command`. The value of this should be + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait + five seconds and one minute 30 seconds, respectively. If this isn't + specified, the default is 10 seconds. -- `disk_size` (integer) - The size of the hard disk for the VM in megabytes. The - builder uses expandable, not fixed-size virtual hard disks, so the actual file - representing the disk will not use the full size unless it is full. By default - this is set to 40,000 (about 40 GB). +- `disk_size` (integer) - The size of the hard disk for the VM in megabytes. + The builder uses expandable, not fixed-size virtual hard disks, so the + actual file representing the disk will not use the full size unless it + is full. By default this is set to 40,000 (about 40 GB). -- `disk_type_id` (string) - The type of VMware virtual disk to create. The - default is "1", which corresponds to a growable virtual disk split in - 2GB files. This option is for advanced usage, modify only if you know what - you're doing. For more information, please consult the [Virtual Disk Manager - User's Guide](http://www.vmware.com/pdf/VirtualDiskManager.pdf) for desktop - VMware clients. For ESXi, refer to the proper ESXi documentation. +- `disk_type_id` (string) - The type of VMware virtual disk to create. The + default is "1", which corresponds to a growable virtual disk split in + 2GB files. This option is for advanced usage, modify only if you know what + you're doing. For more information, please consult the [Virtual Disk Manager + User's Guide](http://www.vmware.com/pdf/VirtualDiskManager.pdf) for desktop + VMware clients. For ESXi, refer to the proper ESXi documentation. -- `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful for - unattended Windows installs, which look for an `Autounattend.xml` file on - removable media. By default, no floppy will be attached. All files listed in - this setting get placed into the root directory of the floppy and the floppy - is attached as the first floppy device. Currently, no support exists for - creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) - are allowed. Directory names are also allowed, which will add all the files - found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, + and \[\]) are allowed. Directory names are also allowed, which will add all + the files found in the directory to the floppy. -- `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this is - "/Applications/VMware Fusion.app" but this setting allows you to - customize this. +- `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this is + "/Applications/VMware Fusion.app" but this setting allows you to + customize this. -- `guest_os_type` (string) - The guest OS type being installed. This will be set - in the VMware VMX. By default this is "other". By specifying a more specific - OS type, VMware may perform some optimizations or virtual hardware changes to - better support the operating system running in the virtual machine. +- `guest_os_type` (string) - The guest OS type being installed. This will be + set in the VMware VMX. By default this is "other". By specifying a more + specific OS type, VMware may perform some optimizations or virtual hardware + changes to better support the operating system running in the + virtual machine. -- `headless` (boolean) - Packer defaults to building VMware virtual machines by - launching a GUI that shows the console of the machine being built. When this - value is set to true, the machine will start without a console. For VMware - machines, Packer will output VNC connection information in case you need to - connect to the console to debug the build process. +- `headless` (boolean) - Packer defaults to building VMware virtual machines + by launching a GUI that shows the console of the machine being built. When + this value is set to true, the machine will start without a console. For + VMware machines, Packer will output VNC connection information in case you + need to connect to the console to debug the build process. -- `http_directory` (string) - Path to a directory to serve using an HTTP server. - The files in this directory will be available over HTTP that will be - requestable from the virtual machine. This is useful for hosting kickstart - files and so on. By default this is "", which means no HTTP server will - be started. The address and port of the HTTP server will be available as - variables in `boot_command`. This is covered in more detail below. +- `http_directory` (string) - Path to a directory to serve using an + HTTP server. The files in this directory will be available over HTTP that + will be requestable from the virtual machine. This is useful for hosting + kickstart files and so on. By default this is "", which means no HTTP server + will be started. The address and port of the HTTP server will be available + as variables in `boot_command`. This is covered in more detail below. -- `http_port_min` and `http_port_max` (integer) - These are the minimum and - maximum port to use for the HTTP server started to serve the `http_directory`. - Because Packer often runs in parallel, Packer will choose a randomly available - port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. By - default the values are 8000 and 9000, respectively. +- `http_port_min` and `http_port_max` (integer) - These are the minimum and + maximum port to use for the HTTP server started to serve the + `http_directory`. Because Packer often runs in parallel, Packer will choose + a randomly available port in this range to run the HTTP server. If you want + to force the HTTP server to be on one port, make this minimum and maximum + port the same. By default the values are 8000 and 9000, respectively. -- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer - will try these in order. If anything goes wrong attempting to download or - while downloading a single URL, it will move on to the next. All URLs must - point to the same file (same checksum). By default this is empty and `iso_url` - is used. Only one of `iso_url` or `iso_urls` can be specified. +- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. + Packer will try these in order. If anything goes wrong attempting to + download or while downloading a single URL, it will move on to the next. All + URLs must point to the same file (same checksum). By default this is empty + and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. -- `output_directory` (string) - This is the path to the directory where the - resulting virtual machine will be created. This may be relative or absolute. - If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running - the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the - name of the build. +- `output_directory` (string) - This is the path to the directory where the + resulting virtual machine will be created. This may be relative or absolute. + If relative, the path is relative to the working directory when `packer` + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -- `remote_cache_datastore` (string) - The path to the datastore where supporting - files will be stored during the build on the remote machine. By default this - is the same as the `remote_datastore` option. This only has an effect if - `remote_type` is enabled. +- `remote_cache_datastore` (string) - The path to the datastore where + supporting files will be stored during the build on the remote machine. By + default this is the same as the `remote_datastore` option. This only has an + effect if `remote_type` is enabled. -- `remote_cache_directory` (string) - The path where the ISO and/or floppy files - will be stored during the build on the remote machine. The path is relative to - the `remote_cache_datastore` on the remote machine. By default this - is "packer\_cache". This only has an effect if `remote_type` is enabled. +- `remote_cache_directory` (string) - The path where the ISO and/or floppy + files will be stored during the build on the remote machine. The path is + relative to the `remote_cache_datastore` on the remote machine. By default + this is "packer\_cache". This only has an effect if `remote_type` + is enabled. -- `remote_datastore` (string) - The path to the datastore where the resulting VM - will be stored when it is built on the remote machine. By default this - is "datastore1". This only has an effect if `remote_type` is enabled. +- `remote_datastore` (string) - The path to the datastore where the resulting + VM will be stored when it is built on the remote machine. By default this + is "datastore1". This only has an effect if `remote_type` is enabled. -- `remote_host` (string) - The host of the remote machine used for access. This - is only required if `remote_type` is enabled. +- `remote_host` (string) - The host of the remote machine used for access. + This is only required if `remote_type` is enabled. -- `remote_password` (string) - The SSH password for the user used to access the - remote machine. By default this is empty. This only has an effect if - `remote_type` is enabled. +- `remote_password` (string) - The SSH password for the user used to access + the remote machine. By default this is empty. This only has an effect if + `remote_type` is enabled. -- `remote_type` (string) - The type of remote machine that will be used to build - this VM rather than a local desktop product. The only value accepted for this - currently is "esx5". If this is not set, a desktop product will be used. By - default, this is not set. +- `remote_type` (string) - The type of remote machine that will be used to + build this VM rather than a local desktop product. The only value accepted + for this currently is "esx5". If this is not set, a desktop product will + be used. By default, this is not set. -- `remote_username` (string) - The username for the SSH user that will access - the remote machine. This is required if `remote_type` is enabled. +- `remote_username` (string) - The username for the SSH user that will access + the remote machine. This is required if `remote_type` is enabled. -- `shutdown_command` (string) - The command to use to gracefully shut down the - machine once all the provisioning is done. By default this is an empty string, - which tells Packer to just forcefully shut down the machine. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty + string, which tells Packer to just forcefully shut down the machine. -- `shutdown_timeout` (string) - The amount of time to wait after executing the - `shutdown_command` for the virtual machine to actually shut down. If it - doesn't shut down in this time, it is an error. By default, the timeout is - "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -- `skip_compaction` (boolean) - VMware-created disks are defragmented and - compacted at the end of the build process using `vmware-vdiskmanager`. In - certain rare cases, this might actually end up making the resulting disks - slightly larger. If you find this to be the case, you can disable compaction - using this configuration value. +- `skip_compaction` (boolean) - VMware-created disks are defragmented and + compacted at the end of the build process using `vmware-vdiskmanager`. In + certain rare cases, this might actually end up making the resulting disks + slightly larger. If you find this to be the case, you can disable compaction + using this configuration value. -- `tools_upload_flavor` (string) - The flavor of the VMware Tools ISO to upload - into the VM. Valid values are "darwin", "linux", and "windows". By default, - this is empty, which means VMware tools won't be uploaded. +- `tools_upload_flavor` (string) - The flavor of the VMware Tools ISO to + upload into the VM. Valid values are "darwin", "linux", and "windows". By + default, this is empty, which means VMware tools won't be uploaded. -- `tools_upload_path` (string) - The path in the VM to upload the VMware tools. - This only takes effect if `tools_upload_flavor` is non-empty. This is a - [configuration template](/docs/templates/configuration-templates.html) that - has a single valid variable: `Flavor`, which will be the value of - `tools_upload_flavor`. By default the upload path is set to `{{.Flavor}}.iso`. - This setting is not used when `remote_type` is "esx5". +- `tools_upload_path` (string) - The path in the VM to upload the + VMware tools. This only takes effect if `tools_upload_flavor` is non-empty. + This is a [configuration + template](/docs/templates/configuration-templates.html) that has a single + valid variable: `Flavor`, which will be the value of `tools_upload_flavor`. + By default the upload path is set to `{{.Flavor}}.iso`. This setting is not + used when `remote_type` is "esx5". -- `version` (string) - The [vmx hardware - version](http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1003746) - for the new virtual machine. Only the default value has been tested, any other - value is experimental. Default value is '9'. +- `version` (string) - The [vmx hardware + version](http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1003746) + for the new virtual machine. Only the default value has been tested, any + other value is experimental. Default value is '9'. -- `vm_name` (string) - This is the name of the VMX file for the new virtual - machine, without the file extension. By default this is "packer-BUILDNAME", - where "BUILDNAME" is the name of the build. +- `vm_name` (string) - This is the name of the VMX file for the new virtual + machine, without the file extension. By default this is "packer-BUILDNAME", + where "BUILDNAME" is the name of the build. -- `vmdk_name` (string) - The filename of the virtual disk that'll be created, - without the extension. This defaults to "packer". +- `vmdk_name` (string) - The filename of the virtual disk that'll be created, + without the extension. This defaults to "packer". -- `vmx_data` (object of key/value strings) - Arbitrary key/values to enter into - the virtual machine VMX file. This is for advanced users who want to set - properties such as memory, CPU, etc. +- `vmx_data` (object of key/value strings) - Arbitrary key/values to enter + into the virtual machine VMX file. This is for advanced users who want to + set properties such as memory, CPU, etc. -- `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`, - except that it is run after the virtual machine is shutdown, and before the - virtual machine is exported. +- `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`, + except that it is run after the virtual machine is shutdown, and before the + virtual machine is exported. -- `vmx_template_path` (string) - Path to a [configuration - template](/docs/templates/configuration-templates.html) that defines the - contents of the virtual machine VMX file for VMware. This is for **advanced - users only** as this can render the virtual machine non-functional. See below - for more information. For basic VMX modifications, try `vmx_data` first. +- `vmx_template_path` (string) - Path to a [configuration + template](/docs/templates/configuration-templates.html) that defines the + contents of the virtual machine VMX file for VMware. This is for **advanced + users only** as this can render the virtual machine non-functional. See + below for more information. For basic VMX modifications, try + `vmx_data` first. -- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to - use for VNC access to the virtual machine. The builder uses VNC to type the - initial `boot_command`. Because Packer generally runs in parallel, Packer uses - a randomly chosen port in this range that appears available. By default this - is 5900 to 6000. The minimum and maximum ports are inclusive. +- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port + to use for VNC access to the virtual machine. The builder uses VNC to type + the initial `boot_command`. Because Packer generally runs in parallel, + Packer uses a randomly chosen port in this range that appears available. By + default this is 5900 to 6000. The minimum and maximum ports are inclusive. ## Boot Command @@ -263,40 +267,40 @@ machine, simulating a human actually typing the keyboard. There are a set of special keys available. If these are in your boot command, they will be replaced by the proper key: -- `` - Backspace +- `` - Backspace -- `` - Delete +- `` - Delete -- `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -- `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -- `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -- `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -- `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -- `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -- `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -- `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -- `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -- `` `` `` - Adds a 1, 5 or 10 second pause before sending - any additional keys. This is useful if you have to generally wait for the UI - to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before + sending any additional keys. This is useful if you have to generally wait + for the UI to update before typing more. In addition to the special keys, each command to type is treated as a [configuration template](/docs/templates/configuration-templates.html). The available variables are: -- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server that - is started serving the directory specified by the `http_directory` - configuration parameter. If `http_directory` isn't specified, these will be - blank! +- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server + that is started serving the directory specified by the `http_directory` + configuration parameter. If `http_directory` isn't specified, these will be + blank! Example boot command. This is actually a working boot command used to start an Ubuntu 12.04 installer: @@ -332,12 +336,12 @@ Within the template, a handful of variables are available so that your template can continue working with the rest of the Packer machinery. Using these variables isn't required, however. -- `Name` - The name of the virtual machine. -- `GuestOS` - The VMware-valid guest OS type. -- `DiskName` - The filename (without the suffix) of the main virtual disk. -- `ISOPath` - The path to the ISO to use for the OS installation. -- `Version` - The Hardware version VMWare will execute this vm under. Also known - as the `virtualhw.version`. +- `Name` - The name of the virtual machine. +- `GuestOS` - The VMware-valid guest OS type. +- `DiskName` - The filename (without the suffix) of the main virtual disk. +- `ISOPath` - The path to the ISO to use for the OS installation. +- `Version` - The Hardware version VMWare will execute this vm under. Also + known as the `virtualhw.version`. ## Building on a Remote vSphere Hypervisor @@ -367,23 +371,23 @@ connections. To use a remote VMware vSphere Hypervisor to build your virtual machine, fill in the required `remote_*` configurations: -- `remote_type` - This must be set to "esx5". +- `remote_type` - This must be set to "esx5". -- `remote_host` - The host of the remote machine. +- `remote_host` - The host of the remote machine. Additionally, there are some optional configurations that you'll likely have to modify as well: -- `remote_datastore` - The path to the datastore where the VM will be stored on - the ESXi machine. +- `remote_datastore` - The path to the datastore where the VM will be stored + on the ESXi machine. -- `remote_cache_datastore` - The path to the datastore where supporting files - will be stored during the build on the remote machine. +- `remote_cache_datastore` - The path to the datastore where supporting files + will be stored during the build on the remote machine. -- `remote_cache_directory` - The path where the ISO and/or floppy files will be - stored during the build on the remote machine. The path is relative to the - `remote_cache_datastore` on the remote machine. +- `remote_cache_directory` - The path where the ISO and/or floppy files will + be stored during the build on the remote machine. The path is relative to + the `remote_cache_datastore` on the remote machine. -- `remote_username` - The SSH username used to access the remote machine. +- `remote_username` - The SSH username used to access the remote machine. -- `remote_password` - The SSH password for access to the remote machine. +- `remote_password` - The SSH password for access to the remote machine. diff --git a/website/source/docs/builders/vmware-vmx.html.markdown b/website/source/docs/builders/vmware-vmx.html.markdown index bd1afb83c..da3a418b3 100644 --- a/website/source/docs/builders/vmware-vmx.html.markdown +++ b/website/source/docs/builders/vmware-vmx.html.markdown @@ -53,99 +53,100 @@ builder. ### Required: -- `source_path` (string) - Path to the source VMX file to clone. +- `source_path` (string) - Path to the source VMX file to clone. -- `ssh_username` (string) - The username to use to SSH into the machine once the - OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once + the OS is installed. ### Optional: -- `boot_command` (array of strings) - This is an array of commands to type when - the virtual machine is first booted. The goal of these commands should be to - type just enough to initialize the operating system installer. Special keys - can be typed as well, and are covered in the section below on the - boot command. If this is not specified, it is assumed the installer will - start itself. +- `boot_command` (array of strings) - This is an array of commands to type + when the virtual machine is first booted. The goal of these commands should + be to type just enough to initialize the operating system installer. Special + keys can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -- `boot_wait` (string) - The time to wait after booting the initial virtual - machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five - seconds and one minute 30 seconds, respectively. If this isn't specified, the - default is 10 seconds. +- `boot_wait` (string) - The time to wait after booting the initial virtual + machine before typing the `boot_command`. The value of this should be + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait + five seconds and one minute 30 seconds, respectively. If this isn't + specified, the default is 10 seconds. -- `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful for - unattended Windows installs, which look for an `Autounattend.xml` file on - removable media. By default, no floppy will be attached. All files listed in - this setting get placed into the root directory of the floppy and the floppy - is attached as the first floppy device. Currently, no support exists for - creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) - are allowed. Directory names are also allowed, which will add all the files - found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, + and \[\]) are allowed. Directory names are also allowed, which will add all + the files found in the directory to the floppy. -- `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this is - "/Applications/VMware Fusion.app" but this setting allows you to - customize this. +- `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this is + "/Applications/VMware Fusion.app" but this setting allows you to + customize this. -- `headless` (boolean) - Packer defaults to building VMware virtual machines by - launching a GUI that shows the console of the machine being built. When this - value is set to true, the machine will start without a console. For VMware - machines, Packer will output VNC connection information in case you need to - connect to the console to debug the build process. +- `headless` (boolean) - Packer defaults to building VMware virtual machines + by launching a GUI that shows the console of the machine being built. When + this value is set to true, the machine will start without a console. For + VMware machines, Packer will output VNC connection information in case you + need to connect to the console to debug the build process. -- `http_directory` (string) - Path to a directory to serve using an HTTP server. - The files in this directory will be available over HTTP that will be - requestable from the virtual machine. This is useful for hosting kickstart - files and so on. By default this is "", which means no HTTP server will - be started. The address and port of the HTTP server will be available as - variables in `boot_command`. This is covered in more detail below. +- `http_directory` (string) - Path to a directory to serve using an + HTTP server. The files in this directory will be available over HTTP that + will be requestable from the virtual machine. This is useful for hosting + kickstart files and so on. By default this is "", which means no HTTP server + will be started. The address and port of the HTTP server will be available + as variables in `boot_command`. This is covered in more detail below. -- `http_port_min` and `http_port_max` (integer) - These are the minimum and - maximum port to use for the HTTP server started to serve the `http_directory`. - Because Packer often runs in parallel, Packer will choose a randomly available - port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. By - default the values are 8000 and 9000, respectively. +- `http_port_min` and `http_port_max` (integer) - These are the minimum and + maximum port to use for the HTTP server started to serve the + `http_directory`. Because Packer often runs in parallel, Packer will choose + a randomly available port in this range to run the HTTP server. If you want + to force the HTTP server to be on one port, make this minimum and maximum + port the same. By default the values are 8000 and 9000, respectively. -- `output_directory` (string) - This is the path to the directory where the - resulting virtual machine will be created. This may be relative or absolute. - If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running - the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the - name of the build. +- `output_directory` (string) - This is the path to the directory where the + resulting virtual machine will be created. This may be relative or absolute. + If relative, the path is relative to the working directory when `packer` + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -- `shutdown_command` (string) - The command to use to gracefully shut down the - machine once all the provisioning is done. By default this is an empty string, - which tells Packer to just forcefully shut down the machine unless a shutdown - command takes place inside script so this may safely be omitted. If one or - more scripts require a reboot it is suggested to leave this blank since - reboots may fail and specify the final shutdown command in your last script. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty + string, which tells Packer to just forcefully shut down the machine unless a + shutdown command takes place inside script so this may safely be omitted. If + one or more scripts require a reboot it is suggested to leave this blank + since reboots may fail and specify the final shutdown command in your + last script. -- `shutdown_timeout` (string) - The amount of time to wait after executing the - `shutdown_command` for the virtual machine to actually shut down. If it - doesn't shut down in this time, it is an error. By default, the timeout is - "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -- `skip_compaction` (boolean) - VMware-created disks are defragmented and - compacted at the end of the build process using `vmware-vdiskmanager`. In - certain rare cases, this might actually end up making the resulting disks - slightly larger. If you find this to be the case, you can disable compaction - using this configuration value. +- `skip_compaction` (boolean) - VMware-created disks are defragmented and + compacted at the end of the build process using `vmware-vdiskmanager`. In + certain rare cases, this might actually end up making the resulting disks + slightly larger. If you find this to be the case, you can disable compaction + using this configuration value. -- `vm_name` (string) - This is the name of the VMX file for the new virtual - machine, without the file extension. By default this is "packer-BUILDNAME", - where "BUILDNAME" is the name of the build. +- `vm_name` (string) - This is the name of the VMX file for the new virtual + machine, without the file extension. By default this is "packer-BUILDNAME", + where "BUILDNAME" is the name of the build. -- `vmx_data` (object of key/value strings) - Arbitrary key/values to enter into - the virtual machine VMX file. This is for advanced users who want to set - properties such as memory, CPU, etc. +- `vmx_data` (object of key/value strings) - Arbitrary key/values to enter + into the virtual machine VMX file. This is for advanced users who want to + set properties such as memory, CPU, etc. -- `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`, - except that it is run after the virtual machine is shutdown, and before the - virtual machine is exported. +- `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`, + except that it is run after the virtual machine is shutdown, and before the + virtual machine is exported. -- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to - use for VNC access to the virtual machine. The builder uses VNC to type the - initial `boot_command`. Because Packer generally runs in parallel, Packer uses - a randomly chosen port in this range that appears available. By default this - is 5900 to 6000. The minimum and maximum ports are inclusive. +- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port + to use for VNC access to the virtual machine. The builder uses VNC to type + the initial `boot_command`. Because Packer generally runs in parallel, + Packer uses a randomly chosen port in this range that appears available. By + default this is 5900 to 6000. The minimum and maximum ports are inclusive. diff --git a/website/source/docs/builders/vmware.html.markdown b/website/source/docs/builders/vmware.html.markdown index e77fe574a..e8486ca4c 100644 --- a/website/source/docs/builders/vmware.html.markdown +++ b/website/source/docs/builders/vmware.html.markdown @@ -15,14 +15,14 @@ Packer actually comes with multiple builders able to create VMware machines, depending on the strategy you want to use to build the image. Packer supports the following VMware builders: -- [vmware-iso](/docs/builders/vmware-iso.html) - Starts from an ISO file, - creates a brand new VMware VM, installs an OS, provisions software within the - OS, then exports that machine to create an image. This is best for people who - want to start from scratch. +- [vmware-iso](/docs/builders/vmware-iso.html) - Starts from an ISO file, + creates a brand new VMware VM, installs an OS, provisions software within + the OS, then exports that machine to create an image. This is best for + people who want to start from scratch. -- [vmware-vmx](/docs/builders/vmware-vmx.html) - This builder imports an - existing VMware machine (from a VMX file), runs provisioners on top of that - VM, and exports that machine to create an image. This is best if you have an - existing VMware VM you want to use as the source. As an additional benefit, - you can feed the artifact of this builder back into Packer to iterate on - a machine. +- [vmware-vmx](/docs/builders/vmware-vmx.html) - This builder imports an + existing VMware machine (from a VMX file), runs provisioners on top of that + VM, and exports that machine to create an image. This is best if you have an + existing VMware VM you want to use as the source. As an additional benefit, + you can feed the artifact of this builder back into Packer to iterate on + a machine. diff --git a/website/source/docs/command-line/build.html.markdown b/website/source/docs/command-line/build.html.markdown index 92afda570..ba4421293 100644 --- a/website/source/docs/command-line/build.html.markdown +++ b/website/source/docs/command-line/build.html.markdown @@ -17,24 +17,26 @@ artifacts that are created will be outputted at the end of the build. ## Options -- `-color=false` - Disables colorized output. Enabled by default. +- `-color=false` - Disables colorized output. Enabled by default. -- `-debug` - Disables parallelization and enables debug mode. Debug mode flags - the builders that they should output debugging information. The exact behavior - of debug mode is left to the builder. In general, builders usually will stop - between each step, waiting for keyboard input before continuing. This will - allow the user to inspect state and so on. +- `-debug` - Disables parallelization and enables debug mode. Debug mode flags + the builders that they should output debugging information. The exact + behavior of debug mode is left to the builder. In general, builders usually + will stop between each step, waiting for keyboard input before continuing. + This will allow the user to inspect state and so on. -- `-except=foo,bar,baz` - Builds all the builds except those with the given - comma-separated names. Build names by default are the names of their builders, - unless a specific `name` attribute is specified within the configuration. +- `-except=foo,bar,baz` - Builds all the builds except those with the given + comma-separated names. Build names by default are the names of their + builders, unless a specific `name` attribute is specified within + the configuration. -- `-force` - Forces a builder to run when artifacts from a previous build - prevent a build from running. The exact behavior of a forced build is left to - the builder. In general, a builder supporting the forced build will remove the - artifacts from the previous build. This will allow the user to repeat a build - without having to manually clean these artifacts beforehand. +- `-force` - Forces a builder to run when artifacts from a previous build + prevent a build from running. The exact behavior of a forced build is left + to the builder. In general, a builder supporting the forced build will + remove the artifacts from the previous build. This will allow the user to + repeat a build without having to manually clean these artifacts beforehand. -- `-only=foo,bar,baz` - Only build the builds with the given - comma-separated names. Build names by default are the names of their builders, - unless a specific `name` attribute is specified within the configuration. +- `-only=foo,bar,baz` - Only build the builds with the given + comma-separated names. Build names by default are the names of their + builders, unless a specific `name` attribute is specified within + the configuration. diff --git a/website/source/docs/command-line/fix.html.markdown b/website/source/docs/command-line/fix.html.markdown index eb383fec6..ec18b69bc 100644 --- a/website/source/docs/command-line/fix.html.markdown +++ b/website/source/docs/command-line/fix.html.markdown @@ -19,7 +19,7 @@ The fix command will output the changed template to standard out, so you should redirect standard using standard OS-specific techniques if you want to save it to a file. For example, on Linux systems, you may want to do this: - $ packer fix old.json > new.json +\$ packer fix old.json > new.json If fixing fails for any reason, the fix command will exit with a non-zero exit status. Error messages appear on standard error, so if you're redirecting diff --git a/website/source/docs/command-line/machine-readable.html.markdown b/website/source/docs/command-line/machine-readable.html.markdown index 550a14f35..fa9fe3cac 100644 --- a/website/source/docs/command-line/machine-readable.html.markdown +++ b/website/source/docs/command-line/machine-readable.html.markdown @@ -53,20 +53,22 @@ timestamp,target,type,data... Each component is explained below: -- **timestamp** is a Unix timestamp in UTC of when the message was printed. +- **timestamp** is a Unix timestamp in UTC of when the message was printed. -- **target** is the target of the following output. This is empty if the message - is related to Packer globally. Otherwise, this is generally a build name so - you can relate output to a specific build while parallel builds are running. +- **target** is the target of the following output. This is empty if the + message is related to Packer globally. Otherwise, this is generally a build + name so you can relate output to a specific build while parallel builds + are running. -- **type** is the type of machine-readable message being outputted. There are a - set of standard types which are covered later, but each component of Packer - (builders, provisioners, etc.) may output their own custom types as well, - allowing the machine-readable output to be infinitely flexible. +- **type** is the type of machine-readable message being outputted. There are + a set of standard types which are covered later, but each component of + Packer (builders, provisioners, etc.) may output their own custom types as + well, allowing the machine-readable output to be infinitely flexible. -- **data** is zero or more comma-seperated values associated with the - prior type. The exact amount and meaning of this data is type-dependent, so - you must read the documentation associated with the type to understand fully. +- **data** is zero or more comma-seperated values associated with the + prior type. The exact amount and meaning of this data is type-dependent, so + you must read the documentation associated with the type to + understand fully. Within the format, if data contains a comma, it is replaced with `%!(PACKER_COMMA)`. This was preferred over an escape character such as `\'` diff --git a/website/source/docs/command-line/push.html.markdown b/website/source/docs/command-line/push.html.markdown index 0cc9699f5..764333967 100644 --- a/website/source/docs/command-line/push.html.markdown +++ b/website/source/docs/command-line/push.html.markdown @@ -26,16 +26,16 @@ configuration](/docs/templates/push.html) must be completed within the template. ## Options -- `-message` - A message to identify the purpose or changes in this Packer - template much like a VCS commit message. This message will be passed to the - Packer build service. This option is also available as a short option `-m`. +- `-message` - A message to identify the purpose or changes in this Packer + template much like a VCS commit message. This message will be passed to the + Packer build service. This option is also available as a short option `-m`. -- `-token` - An access token for authenticating the push to the Packer build - service such as Atlas. This can also be specified within the push - configuration in the template. +- `-token` - An access token for authenticating the push to the Packer build + service such as Atlas. This can also be specified within the push + configuration in the template. -- `-name` - The name of the build in the service. This typically looks like - `hashicorp/precise64`. +- `-name` - The name of the build in the service. This typically looks like + `hashicorp/precise64`. ## Examples diff --git a/website/source/docs/command-line/validate.html.markdown b/website/source/docs/command-line/validate.html.markdown index e17f23dc4..c49e6587d 100644 --- a/website/source/docs/command-line/validate.html.markdown +++ b/website/source/docs/command-line/validate.html.markdown @@ -29,5 +29,5 @@ Errors validating build 'vmware'. 1 error(s) occurred: ## Options -- `-syntax-only` - Only the syntax of the template is checked. The configuration - is not validated. +- `-syntax-only` - Only the syntax of the template is checked. The + configuration is not validated. diff --git a/website/source/docs/extend/developing-plugins.html.markdown b/website/source/docs/extend/developing-plugins.html.markdown index 0d86df3d2..8af8a241d 100644 --- a/website/source/docs/extend/developing-plugins.html.markdown +++ b/website/source/docs/extend/developing-plugins.html.markdown @@ -52,19 +52,19 @@ the following two packages, you're encouraged to use whatever packages you want. Because plugins are their own processes, there is no danger of colliding dependencies. -- `github.com/mitchellh/packer` - Contains all the interfaces that you have to - implement for any given plugin. +- `github.com/mitchellh/packer` - Contains all the interfaces that you have to + implement for any given plugin. -- `github.com/mitchellh/packer/plugin` - Contains the code to serve the plugin. - This handles all the inter-process communication stuff. +- `github.com/mitchellh/packer/plugin` - Contains the code to serve + the plugin. This handles all the inter-process communication stuff. There are two steps involved in creating a plugin: -1. Implement the desired interface. For example, if you're building a builder - plugin, implement the `packer.Builder` interface. +1. Implement the desired interface. For example, if you're building a builder + plugin, implement the `packer.Builder` interface. -2. Serve the interface by calling the appropriate plugin serving method in your - main method. In the case of a builder, this is `plugin.ServeBuilder`. +2. Serve the interface by calling the appropriate plugin serving method in your + main method. In the case of a builder, this is `plugin.ServeBuilder`. A basic example is shown below. In this example, assume the `Builder` struct implements the `packer.Builder` interface: diff --git a/website/source/docs/extend/plugins.html.markdown b/website/source/docs/extend/plugins.html.markdown index 98249de5d..9f18ca138 100644 --- a/website/source/docs/extend/plugins.html.markdown +++ b/website/source/docs/extend/plugins.html.markdown @@ -51,21 +51,21 @@ Once the plugin is named properly, Packer automatically discovers plugins in the following directories in the given order. If a conflicting plugin is found later, it will take precedence over one found earlier. -1. The directory where `packer` is, or the executable directory. +1. The directory where `packer` is, or the executable directory. -2. `~/.packer.d/plugins` on Unix systems or `%APPDATA%/packer.d/plugins` - on Windows. +2. `~/.packer.d/plugins` on Unix systems or `%APPDATA%/packer.d/plugins` + on Windows. -3. The current working directory. +3. The current working directory. The valid types for plugins are: -- `builder` - Plugins responsible for building images for a specific platform. +- `builder` - Plugins responsible for building images for a specific platform. -- `command` - A CLI sub-command for `packer`. +- `command` - A CLI sub-command for `packer`. -- `post-processor` - A post-processor responsible for taking an artifact from a - builder and turning it into something else. +- `post-processor` - A post-processor responsible for taking an artifact from + a builder and turning it into something else. -- `provisioner` - A provisioner to install software on images created by - a builder. +- `provisioner` - A provisioner to install software on images created by + a builder. diff --git a/website/source/docs/extend/post-processor.html.markdown b/website/source/docs/extend/post-processor.html.markdown index 1120bc31d..9067e19d8 100644 --- a/website/source/docs/extend/post-processor.html.markdown +++ b/website/source/docs/extend/post-processor.html.markdown @@ -79,11 +79,11 @@ creating a new artifact with a single file: the compressed archive. The result signature of this method is `(Artifact, bool, error)`. Each return value is explained below: -- `Artifact` - The newly created artifact if no errors occurred. -- `bool` - If true, the input artifact will forcefully be kept. By default, - Packer typically deletes all input artifacts, since the user doesn't generally - want intermediary artifacts. However, some post-processors depend on the - previous artifact existing. If this is `true`, it forces packer to keep the - artifact around. -- `error` - Non-nil if there was an error in any way. If this is the case, the - other two return values are ignored. +- `Artifact` - The newly created artifact if no errors occurred. +- `bool` - If true, the input artifact will forcefully be kept. By default, + Packer typically deletes all input artifacts, since the user doesn't + generally want intermediary artifacts. However, some post-processors depend + on the previous artifact existing. If this is `true`, it forces packer to + keep the artifact around. +- `error` - Non-nil if there was an error in any way. If this is the case, the + other two return values are ignored. diff --git a/website/source/docs/machine-readable/command-build.html.markdown b/website/source/docs/machine-readable/command-build.html.markdown index 7472b7bfc..7b7b27993 100644 --- a/website/source/docs/machine-readable/command-build.html.markdown +++ b/website/source/docs/machine-readable/command-build.html.markdown @@ -12,8 +12,10 @@ These are the machine-readable types that exist as part of the output of `packer build`.
    -
    artifact (>= 2)
    -
    +
    +artifact (>= 2) +
    +

    Information about an artifact of the targeted item. This is a fairly complex (but uniform!) machine-readable type that contains @@ -37,10 +39,12 @@ These are the machine-readable types that exist as part of the output of data points related to the subtype. The exact count and meaning of this subtypes comes from the subtype documentation.

    -
    -
    artifact-count (1)
    -
    +
    +
    +artifact-count (1) +
    +

    The number of artifacts associated with the given target. This will always be outputted _before_ any other artifact information, @@ -51,10 +55,12 @@ These are the machine-readable types that exist as part of the output of Data 1: count - The number of artifacts as a base 10 integer.

    -
    -
    artifact subtype: builder-id (1)
    -
    +
    +
    +artifact subtype: builder-id (1) +
    +

    The unique ID of the builder that created this artifact.

    @@ -62,19 +68,23 @@ These are the machine-readable types that exist as part of the output of

    Data 1: id - The unique ID of the builder.

    -
    -
    artifact subtype: end (0)
    -
    +
    +
    +artifact subtype: end (0) +
    +

    The last machine-readable output line outputted for an artifact. This is a sentinel value so you know that no more data related to the targetted artifact will be outputted.

    -
    -
    artifact subtype: file (2)
    -
    +
    +
    +artifact subtype: file (2) +
    +

    A single file associated with the artifact. There are 0 to "files-count" of these entries to describe every file that is @@ -89,10 +99,12 @@ These are the machine-readable types that exist as part of the output of

    Data 2: filename - The filename.

    -
    -
    artifact subtype: files-count (1)
    -
    +
    +
    +artifact subtype: files-count (1) +
    +

    The number of files associated with this artifact. Not all artifacts have files associated with it. @@ -101,10 +113,12 @@ These are the machine-readable types that exist as part of the output of

    Data 1: count - The number of files.

    -
    -
    artifact subtype: id (1)
    -
    +
    +
    +artifact subtype: id (1) +
    +

    The ID (if any) of the artifact that was built. Not all artifacts have associated IDs. For example, AMIs built have IDs associated @@ -115,18 +129,22 @@ These are the machine-readable types that exist as part of the output of

    Data 1: id - The ID of the artifact.

    -
    -
    artifact subtype: nil (0)
    -
    +
    +
    +artifact subtype: nil (0) +
    +

    If present, this means that the artifact was nil, or that the targeted build completed successfully but no artifact was created.

    -
    -
    artifact subtype: string (1)
    -
    +
    +
    +artifact subtype: string (1) +
    +

    The human-readable string description of the artifact provided by the artifact itself. @@ -135,10 +153,12 @@ These are the machine-readable types that exist as part of the output of

    Data 1: string - The string output for the artifact.

    -
    -
    error-count (1)
    -
    +
    +
    +error-count (1) +
    +

    The number of errors that occurred during the build. This will always be outputted before any errors so you know how many are coming. @@ -148,10 +168,12 @@ These are the machine-readable types that exist as part of the output of Data 1: count - The number of build errors as a base 10 integer.

    -
    -
    error (1)
    -
    +
    +
    +error (1) +
    +

    A build error that occurred. The target of this output will be the build that had the error. @@ -160,6 +182,6 @@ These are the machine-readable types that exist as part of the output of

    Data 1: error - The error message as a string.

    -
    +
    diff --git a/website/source/docs/machine-readable/command-inspect.html.markdown b/website/source/docs/machine-readable/command-inspect.html.markdown index 4a5d68876..a75b892f3 100644 --- a/website/source/docs/machine-readable/command-inspect.html.markdown +++ b/website/source/docs/machine-readable/command-inspect.html.markdown @@ -12,8 +12,10 @@ These are the machine-readable types that exist as part of the output of `packer inspect`.
    -
    template-variable (3)
    -
    +
    +template-variable (3) +
    +

    A user variable defined within the template. @@ -32,10 +34,12 @@ These are the machine-readable types that exist as part of the output of Data 3: required - If non-zero, then this variable is required.

    -
    -
    template-builder (2)
    -
    +
    +
    +template-builder (2) +
    +

    A builder defined within the template

    @@ -48,10 +52,12 @@ These are the machine-readable types that exist as part of the output of generally be the same as the name unless you explicitly override the name.

    -
    -
    template-provisioner (1)
    -
    +
    +
    +template-provisioner (1) +
    +

    A provisioner defined within the template. Multiple of these may exist. If so, they are outputted in the order they would run. @@ -60,6 +66,6 @@ These are the machine-readable types that exist as part of the output of

    Data 1: name - The name/type of the provisioner.

    -
    +
    diff --git a/website/source/docs/machine-readable/command-version.html.markdown b/website/source/docs/machine-readable/command-version.html.markdown index 8b32b2540..4d7be6d23 100644 --- a/website/source/docs/machine-readable/command-version.html.markdown +++ b/website/source/docs/machine-readable/command-version.html.markdown @@ -12,8 +12,10 @@ These are the machine-readable types that exist as part of the output of `packer version`.
    -
    version (1)
    -
    +
    +version (1) +
    +

    The version number of Packer running.

    @@ -21,19 +23,23 @@ These are the machine-readable types that exist as part of the output of only including the major, minor, and patch versions. Example: "0.2.4".

    -
    -
    version-commit (1)
    -
    +
    +
    +version-commit (1) +
    +

    The SHA1 of the Git commit that built this version of Packer.

    Data 1: commit SHA1 - The SHA1 of the commit.

    -
    -
    version-prerelease (1)
    -
    +
    +
    +version-prerelease (1) +
    +

    The prerelease tag (if any) for the running version of Packer. This can be "beta", "dev", "alpha", etc. If this is empty, you can assume @@ -44,6 +50,6 @@ These are the machine-readable types that exist as part of the output of Data 1: prerelease name - The name of the prerelease tag.

    -
    +
    diff --git a/website/source/docs/machine-readable/general.html.markdown b/website/source/docs/machine-readable/general.html.markdown index b29ae053f..721406d7a 100644 --- a/website/source/docs/machine-readable/general.html.markdown +++ b/website/source/docs/machine-readable/general.html.markdown @@ -12,8 +12,10 @@ These are the machine-readable types that can appear in almost any machine-readable output and are provided by Packer core itself.
    -
    ui (2)
    -
    +
    +ui (2) +
    +

    Specifies the output and type of output that would've normally gone to the console if Packer were running in human-readable @@ -28,6 +30,6 @@ machine-readable output and are provided by Packer core itself. Data 2: output - The UI message that would have been outputted.

    -
    +
    diff --git a/website/source/docs/machine-readable/index.html.markdown b/website/source/docs/machine-readable/index.html.markdown index 161bda001..cde344947 100644 --- a/website/source/docs/machine-readable/index.html.markdown +++ b/website/source/docs/machine-readable/index.html.markdown @@ -24,12 +24,14 @@ Within each section, the format of the documentation is the following:
    -
    type-name (data-count)
    -
    +
    +type-name (data-count) +
    +

    Description of the type.

    Data 1: name - Description.

    -
    +
    diff --git a/website/source/docs/other/core-configuration.html.markdown b/website/source/docs/other/core-configuration.html.markdown index db1f75ab7..a112801e8 100644 --- a/website/source/docs/other/core-configuration.html.markdown +++ b/website/source/docs/other/core-configuration.html.markdown @@ -32,13 +32,13 @@ The format of the configuration file is basic JSON. Below is the list of all available configuration parameters for the core configuration file. None of these are required, since all have sane defaults. -- `plugin_min_port` and `plugin_max_port` (integer) - These are the minimum and - maximum ports that Packer uses for communication with plugins, since plugin - communication happens over TCP connections on your local host. By default - these are 10,000 and 25,000, respectively. Be sure to set a fairly wide range - here, since Packer can easily use over 25 ports on a single run. +- `plugin_min_port` and `plugin_max_port` (integer) - These are the minimum + and maximum ports that Packer uses for communication with plugins, since + plugin communication happens over TCP connections on your local host. By + default these are 10,000 and 25,000, respectively. Be sure to set a fairly + wide range here, since Packer can easily use over 25 ports on a single run. -- `builders`, `commands`, `post-processors`, and `provisioners` are objects that - are used to install plugins. The details of how exactly these are set is - covered in more detail in the [installing plugins documentation - page](/docs/extend/plugins.html). +- `builders`, `commands`, `post-processors`, and `provisioners` are objects + that are used to install plugins. The details of how exactly these are set + is covered in more detail in the [installing plugins documentation + page](/docs/extend/plugins.html). diff --git a/website/source/docs/other/environmental-variables.html.markdown b/website/source/docs/other/environmental-variables.html.markdown index 7d455c708..8827ea5d9 100644 --- a/website/source/docs/other/environmental-variables.html.markdown +++ b/website/source/docs/other/environmental-variables.html.markdown @@ -9,28 +9,28 @@ page_title: Environmental Variables for Packer Packer uses a variety of environmental variables. A listing and description of each can be found below: -- `PACKER_CACHE_DIR` - The location of the packer cache. +- `PACKER_CACHE_DIR` - The location of the packer cache. -- `PACKER_CONFIG` - The location of the core configuration file. The format of - the configuration file is basic JSON. See the [core configuration - page](/docs/other/core-configuration.html). +- `PACKER_CONFIG` - The location of the core configuration file. The format of + the configuration file is basic JSON. See the [core configuration + page](/docs/other/core-configuration.html). -- `PACKER_LOG` - Setting this to any value will enable the logger. See the - [debugging page](/docs/other/debugging.html). +- `PACKER_LOG` - Setting this to any value will enable the logger. See the + [debugging page](/docs/other/debugging.html). -- `PACKER_LOG_PATH` - The location of the log file. Note: `PACKER_LOG` must be - set for any logging to occur. See the [debugging - page](/docs/other/debugging.html). +- `PACKER_LOG_PATH` - The location of the log file. Note: `PACKER_LOG` must be + set for any logging to occur. See the [debugging + page](/docs/other/debugging.html). -- `PACKER_NO_COLOR` - Setting this to any value will disable color in - the terminal. +- `PACKER_NO_COLOR` - Setting this to any value will disable color in + the terminal. -- `PACKER_PLUGIN_MAX_PORT` - The maximum port that Packer uses for communication - with plugins, since plugin communication happens over TCP connections on your - local host. The default is 25,000. See the [core configuration - page](/docs/other/core-configuration.html). +- `PACKER_PLUGIN_MAX_PORT` - The maximum port that Packer uses for + communication with plugins, since plugin communication happens over TCP + connections on your local host. The default is 25,000. See the [core + configuration page](/docs/other/core-configuration.html). -- `PACKER_PLUGIN_MIN_PORT` - The minimum port that Packer uses for communication - with plugins, since plugin communication happens over TCP connections on your - local host. The default is 10,000. See the [core configuration - page](/docs/other/core-configuration.html). +- `PACKER_PLUGIN_MIN_PORT` - The minimum port that Packer uses for + communication with plugins, since plugin communication happens over TCP + connections on your local host. The default is 10,000. See the [core + configuration page](/docs/other/core-configuration.html). diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index 18211c313..4f2cb3640 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -25,14 +25,14 @@ location in Atlas. Here is an example workflow: -1. Packer builds an AMI with the [Amazon AMI - builder](/docs/builders/amazon.html) -2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas. - The `atlas` post-processor is configured with the name of the AMI, for example - `hashicorp/foobar`, to create the artifact in Atlas or update the version if - the artifact already exists -3. The new version is ready and available to be used in deployments with a tool - like [Terraform](https://terraform.io) +1. Packer builds an AMI with the [Amazon AMI + builder](/docs/builders/amazon.html) +2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas. + The `atlas` post-processor is configured with the name of the AMI, for + example `hashicorp/foobar`, to create the artifact in Atlas or update the + version if the artifact already exists +3. The new version is ready and available to be used in deployments with a tool + like [Terraform](https://terraform.io) ## Configuration @@ -40,32 +40,33 @@ The configuration allows you to specify and access the artifact in Atlas. ### Required: -- `token` (string) - Your access token for the Atlas API. This can be generated - on your [tokens page](https://atlas.hashicorp.com/settings/tokens). - Alternatively you can export your Atlas token as an environmental variable and - remove it from the configuration. +- `token` (string) - Your access token for the Atlas API. This can be + generated on your [tokens + page](https://atlas.hashicorp.com/settings/tokens). Alternatively you can + export your Atlas token as an environmental variable and remove it from + the configuration. -- `artifact` (string) - The shorthand tag for your artifact that maps to Atlas, - i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`. You must - have access to the organization, hashicorp in this example, in order to add an - artifact to the organization in Atlas. +- `artifact` (string) - The shorthand tag for your artifact that maps to + Atlas, i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`. + You must have access to the organization, hashicorp in this example, in + order to add an artifact to the organization in Atlas. -- `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will - always be `amazon.ami`. This field must be defined because Atlas can host - other artifact types, such as Vagrant boxes. +- `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will + always be `amazon.ami`. This field must be defined because Atlas can host + other artifact types, such as Vagrant boxes. -> **Note:** If you want to upload Vagrant boxes to Atlas, use the [Atlas post-processor](/docs/post-processors/atlas.html). ### Optional: -- `atlas_url` (string) - Override the base URL for Atlas. This is useful if - you're using Atlas Enterprise in your own network. Defaults to - `https://atlas.hashicorp.com/api/v1`. +- `atlas_url` (string) - Override the base URL for Atlas. This is useful if + you're using Atlas Enterprise in your own network. Defaults to + `https://atlas.hashicorp.com/api/v1`. -- `metadata` (map) - Send metadata about the artifact. If the artifact type is - "vagrant.box", you must specify a "provider" metadata about what provider - to use. +- `metadata` (map) - Send metadata about the artifact. If the artifact type is + "vagrant.box", you must specify a "provider" metadata about what provider + to use. ### Example Configuration diff --git a/website/source/docs/post-processors/compress.html.markdown b/website/source/docs/post-processors/compress.html.markdown index 716e4e866..ad78a9315 100644 --- a/website/source/docs/post-processors/compress.html.markdown +++ b/website/source/docs/post-processors/compress.html.markdown @@ -20,25 +20,25 @@ VMware or VirtualBox) and compresses the artifact into a single archive. You must specify the output filename. The archive format is derived from the filename. -- `output` (string) - The path to save the compressed archive. The archive - format is inferred from the filename. E.g. `.tar.gz` will be a - gzipped tarball. `.zip` will be a zip file. If the extension can't be detected - packer defaults to `.tar.gz` behavior but will not change the filename. +- `output` (string) - The path to save the compressed archive. The archive + format is inferred from the filename. E.g. `.tar.gz` will be a + gzipped tarball. `.zip` will be a zip file. If the extension can't be + detected packer defaults to `.tar.gz` behavior but will not change + the filename. - If you are executing multiple builders in parallel you should make sure - `output` is unique for each one. For example - `packer_{{.BuildName}}_{{.Provider}}.zip`. +If you are executing multiple builders in parallel you should make sure `output` +is unique for each one. For example `packer_{{.BuildName}}_{{.Provider}}.zip`. ### Optional: If you want more control over how the archive is created you can specify the following settings: -- `compression_level` (integer) - Specify the compression level, for algorithms - that support it, from 1 through 9 inclusive. Typically higher compression - levels take longer but produce smaller files. Defaults to `6` +- `compression_level` (integer) - Specify the compression level, for + algorithms that support it, from 1 through 9 inclusive. Typically higher + compression levels take longer but produce smaller files. Defaults to `6` -- `keep_input_artifact` (boolean) - Keep source files; defaults to `false` +- `keep_input_artifact` (boolean) - Keep source files; defaults to `false` ### Supported Formats diff --git a/website/source/docs/post-processors/docker-import.html.markdown b/website/source/docs/post-processors/docker-import.html.markdown index 0c3855622..968705f4b 100644 --- a/website/source/docs/post-processors/docker-import.html.markdown +++ b/website/source/docs/post-processors/docker-import.html.markdown @@ -24,9 +24,9 @@ registry. The configuration for this post-processor is extremely simple. At least a repository is required. -- `repository` (string) - The repository of the imported image. +- `repository` (string) - The repository of the imported image. -- `tag` (string) - The tag for the imported image. By default this is not set. +- `tag` (string) - The tag for the imported image. By default this is not set. ## Example diff --git a/website/source/docs/post-processors/docker-push.html.markdown b/website/source/docs/post-processors/docker-push.html.markdown index 72793b735..9657e27b7 100644 --- a/website/source/docs/post-processors/docker-push.html.markdown +++ b/website/source/docs/post-processors/docker-push.html.markdown @@ -18,16 +18,16 @@ pushes it to a Docker registry. This post-processor has only optional configuration: -- `login` (boolean) - Defaults to false. If true, the post-processor will login - prior to pushing. +- `login` (boolean) - Defaults to false. If true, the post-processor will + login prior to pushing. -- `login_email` (string) - The email to use to authenticate to login. +- `login_email` (string) - The email to use to authenticate to login. -- `login_username` (string) - The username to use to authenticate to login. +- `login_username` (string) - The username to use to authenticate to login. -- `login_password` (string) - The password to use to authenticate to login. +- `login_password` (string) - The password to use to authenticate to login. -- `login_server` (string) - The server address to login to. +- `login_server` (string) - The server address to login to. -> **Note:** If you login using the credentials above, the post-processor will automatically log you out afterwards (just the server specified). diff --git a/website/source/docs/post-processors/docker-save.html.markdown b/website/source/docs/post-processors/docker-save.html.markdown index 8f758755c..27b9b7533 100644 --- a/website/source/docs/post-processors/docker-save.html.markdown +++ b/website/source/docs/post-processors/docker-save.html.markdown @@ -25,7 +25,7 @@ familiar with this and vice versa. The configuration for this post-processor is extremely simple. -- `path` (string) - The path to save the image. +- `path` (string) - The path to save the image. ## Example diff --git a/website/source/docs/post-processors/docker-tag.html.markdown b/website/source/docs/post-processors/docker-tag.html.markdown index 42c480676..ea9fccad1 100644 --- a/website/source/docs/post-processors/docker-tag.html.markdown +++ b/website/source/docs/post-processors/docker-tag.html.markdown @@ -27,12 +27,12 @@ that this works with committed resources, rather than exported. The configuration for this post-processor is extremely simple. At least a repository is required. -- `repository` (string) - The repository of the image. +- `repository` (string) - The repository of the image. -- `tag` (string) - The tag for the image. By default this is not set. +- `tag` (string) - The tag for the image. By default this is not set. -- `force` (boolean) - If true, this post-processor forcibly tag the image even - if tag name is collided. Default to `false`. +- `force` (boolean) - If true, this post-processor forcibly tag the image even + if tag name is collided. Default to `false`. ## Example diff --git a/website/source/docs/post-processors/vagrant-cloud.html.markdown b/website/source/docs/post-processors/vagrant-cloud.html.markdown index 4891797e8..237684aa1 100644 --- a/website/source/docs/post-processors/vagrant-cloud.html.markdown +++ b/website/source/docs/post-processors/vagrant-cloud.html.markdown @@ -36,16 +36,16 @@ and deliver them to your team in some fashion. Here is an example workflow: -1. You use Packer to build a Vagrant Box for the `virtualbox` provider -2. The `vagrant-cloud` post-processor is configured to point to the box - `hashicorp/foobar` on Vagrant Cloud via the `box_tag` configuration -3. The post-processor receives the box from the `vagrant` post-processor -4. It then creates the configured version, or verifies the existence of it, on - Vagrant Cloud -5. A provider matching the name of the Vagrant provider is then created -6. The box is uploaded to Vagrant Cloud -7. The upload is verified -8. The version is released and available to users of the box +1. You use Packer to build a Vagrant Box for the `virtualbox` provider +2. The `vagrant-cloud` post-processor is configured to point to the box + `hashicorp/foobar` on Vagrant Cloud via the `box_tag` configuration +3. The post-processor receives the box from the `vagrant` post-processor +4. It then creates the configured version, or verifies the existence of it, on + Vagrant Cloud +5. A provider matching the name of the Vagrant provider is then created +6. The box is uploaded to Vagrant Cloud +7. The upload is verified +8. The version is released and available to users of the box ## Configuration @@ -54,35 +54,35 @@ on Vagrant Cloud, as well as authentication and version information. ### Required: -- `access_token` (string) - Your access token for the Vagrant Cloud API. This - can be generated on your [tokens - page](https://vagrantcloud.com/account/tokens). +- `access_token` (string) - Your access token for the Vagrant Cloud API. This + can be generated on your [tokens + page](https://vagrantcloud.com/account/tokens). -- `box_tag` (string) - The shorthand tag for your box that maps to Vagrant - Cloud, i.e `hashicorp/precise64` for `vagrantcloud.com/hashicorp/precise64` +- `box_tag` (string) - The shorthand tag for your box that maps to Vagrant + Cloud, i.e `hashicorp/precise64` for `vagrantcloud.com/hashicorp/precise64` -- `version` (string) - The version number, typically incrementing a - previous version. The version string is validated based on [Semantic - Versioning](http://semver.org/). The string must match a pattern that could be - semver, and doesn't validate that the version comes after your - previous versions. +- `version` (string) - The version number, typically incrementing a + previous version. The version string is validated based on [Semantic + Versioning](http://semver.org/). The string must match a pattern that could + be semver, and doesn't validate that the version comes after your + previous versions. ### Optional: -- `no_release` (string) - If set to true, does not release the version on - Vagrant Cloud, making it active. You can manually release the version via the - API or Web UI. Defaults to false. +- `no_release` (string) - If set to true, does not release the version on + Vagrant Cloud, making it active. You can manually release the version via + the API or Web UI. Defaults to false. -- `vagrant_cloud_url` (string) - Override the base URL for Vagrant Cloud. This - is useful if you're using Vagrant Private Cloud in your own network. Defaults - to `https://vagrantcloud.com/api/v1` +- `vagrant_cloud_url` (string) - Override the base URL for Vagrant Cloud. This + is useful if you're using Vagrant Private Cloud in your own network. + Defaults to `https://vagrantcloud.com/api/v1` -- `version_description` (string) - Optionally markdown text used as a - full-length and in-depth description of the version, typically for denoting - changes introduced +- `version_description` (string) - Optionally markdown text used as a + full-length and in-depth description of the version, typically for denoting + changes introduced -- `box_download_url` (string) - Optional URL for a self-hosted box. If this is - set the box will not be uploaded to the Vagrant Cloud. +- `box_download_url` (string) - Optional URL for a self-hosted box. If this is + set the box will not be uploaded to the Vagrant Cloud. ## Use with Vagrant Post-Processor diff --git a/website/source/docs/post-processors/vagrant.html.markdown b/website/source/docs/post-processors/vagrant.html.markdown index da1b8daa9..3e55e2549 100644 --- a/website/source/docs/post-processors/vagrant.html.markdown +++ b/website/source/docs/post-processors/vagrant.html.markdown @@ -29,13 +29,13 @@ certain builders into proper boxes for their respective providers. Currently, the Vagrant post-processor can create boxes for the following providers. -- AWS -- DigitalOcean -- Hyper-V -- Parallels -- QEMU -- VirtualBox -- VMware +- AWS +- DigitalOcean +- Hyper-V +- Parallels +- QEMU +- VirtualBox +- VMware -> **Support for additional providers** is planned. If the Vagrant post-processor doesn't support creating boxes for a provider you care about, @@ -51,28 +51,28 @@ However, if you want to configure things a bit more, the post-processor does expose some configuration options. The available options are listed below, with more details about certain options in following sections. -- `compression_level` (integer) - An integer representing the compression level - to use when creating the Vagrant box. Valid values range from 0 to 9, with 0 - being no compression and 9 being the best compression. By default, compression - is enabled at level 6. +- `compression_level` (integer) - An integer representing the compression + level to use when creating the Vagrant box. Valid values range from 0 to 9, + with 0 being no compression and 9 being the best compression. By default, + compression is enabled at level 6. -- `include` (array of strings) - Paths to files to include in the Vagrant box. - These files will each be copied into the top level directory of the Vagrant - box (regardless of their paths). They can then be used from the Vagrantfile. +- `include` (array of strings) - Paths to files to include in the Vagrant box. + These files will each be copied into the top level directory of the Vagrant + box (regardless of their paths). They can then be used from the Vagrantfile. -- `keep_input_artifact` (boolean) - If set to true, do not delete the - `output_directory` on a successful build. Defaults to false. +- `keep_input_artifact` (boolean) - If set to true, do not delete the + `output_directory` on a successful build. Defaults to false. -- `output` (string) - The full path to the box file that will be created by - this post-processor. This is a [configuration - template](/docs/templates/configuration-templates.html). The variable - `Provider` is replaced by the Vagrant provider the box is for. The variable - `ArtifactId` is replaced by the ID of the input artifact. The variable - `BuildName` is replaced with the name of the build. By default, the value of - this config is `packer_{{.BuildName}}_{{.Provider}}.box`. +- `output` (string) - The full path to the box file that will be created by + this post-processor. This is a [configuration + template](/docs/templates/configuration-templates.html). The variable + `Provider` is replaced by the Vagrant provider the box is for. The variable + `ArtifactId` is replaced by the ID of the input artifact. The variable + `BuildName` is replaced with the name of the build. By default, the value of + this config is `packer_{{.BuildName}}_{{.Provider}}.box`. -- `vagrantfile_template` (string) - Path to a template to use for the - Vagrantfile that is packaged with the box. +- `vagrantfile_template` (string) - Path to a template to use for the + Vagrantfile that is packaged with the box. ## Provider-Specific Overrides diff --git a/website/source/docs/post-processors/vsphere.html.markdown b/website/source/docs/post-processors/vsphere.html.markdown index f0fd9588e..300155773 100644 --- a/website/source/docs/post-processors/vsphere.html.markdown +++ b/website/source/docs/post-processors/vsphere.html.markdown @@ -21,35 +21,36 @@ each category, the available configuration keys are alphabetized. Required: -- `cluster` (string) - The cluster to upload the VM to. +- `cluster` (string) - The cluster to upload the VM to. -- `datacenter` (string) - The name of the datacenter within vSphere to add the - VM to. +- `datacenter` (string) - The name of the datacenter within vSphere to add the + VM to. -- `datastore` (string) - The name of the datastore to store this VM. This is - *not required* if `resource_pool` is specified. +- `datastore` (string) - The name of the datastore to store this VM. This is + *not required* if `resource_pool` is specified. -- `host` (string) - The vSphere host that will be contacted to perform the - VM upload. +- `host` (string) - The vSphere host that will be contacted to perform the + VM upload. -- `password` (string) - Password to use to authenticate to the vSphere endpoint. +- `password` (string) - Password to use to authenticate to the + vSphere endpoint. -- `resource_pool` (string) - The resource pool to upload the VM to. This is *not - required*. +- `resource_pool` (string) - The resource pool to upload the VM to. This is + *not required*. -- `username` (string) - The username to use to authenticate to the - vSphere endpoint. +- `username` (string) - The username to use to authenticate to the + vSphere endpoint. -- `vm_name` (string) - The name of the VM once it is uploaded. +- `vm_name` (string) - The name of the VM once it is uploaded. Optional: -- `disk_mode` (string) - Target disk format. See `ovftool` manual for - available options. By default, "thick" will be used. +- `disk_mode` (string) - Target disk format. See `ovftool` manual for + available options. By default, "thick" will be used. -- `insecure` (boolean) - Whether or not the connection to vSphere can be done - over an insecure connection. By default this is false. +- `insecure` (boolean) - Whether or not the connection to vSphere can be done + over an insecure connection. By default this is false. -- `vm_folder` (string) - The folder within the datastore to store the VM. +- `vm_folder` (string) - The folder within the datastore to store the VM. -- `vm_network` (string) - The name of the VM network this VM will be added to. +- `vm_network` (string) - The name of the VM network this VM will be added to. diff --git a/website/source/docs/provisioners/ansible-local.html.markdown b/website/source/docs/provisioners/ansible-local.html.markdown index 5682043c9..7fd084c0a 100644 --- a/website/source/docs/provisioners/ansible-local.html.markdown +++ b/website/source/docs/provisioners/ansible-local.html.markdown @@ -35,83 +35,70 @@ The reference of available configuration options is listed below. Required: -- `playbook_file` (string) - The playbook file to be executed by ansible. This - file must exist on your local system and will be uploaded to the - remote machine. +- `playbook_file` (string) - The playbook file to be executed by ansible. This + file must exist on your local system and will be uploaded to the + remote machine. Optional: -- `command` (string) - The command to invoke ansible. Defaults - to "ansible-playbook". +- `command` (string) - The command to invoke ansible. Defaults + to "ansible-playbook". -- `extra_arguments` (array of strings) - An array of extra arguments to pass to - the ansible command. By default, this is empty. +- `extra_arguments` (array of strings) - An array of extra arguments to pass + to the ansible command. By default, this is empty. -- `inventory_groups` (string) - A comma-separated list of groups to which packer - will assign the host `127.0.0.1`. A value of `my_group_1,my_group_2` will - generate an Ansible inventory like: +- `inventory_groups` (string) - A comma-separated list of groups to which + packer will assign the host `127.0.0.1`. A value of `my_group_1,my_group_2` + will generate an Ansible inventory like: - ``` {.text} - [my_group_1] - 127.0.0.1 - [my_group_2] - 127.0.0.1 - ``` +`{.text} [my_group_1] 127.0.0.1 [my_group_2] 127.0.0.1` -- `inventory_file` (string) - The inventory file to be used by ansible. This - file must exist on your local system and will be uploaded to the - remote machine. +- `inventory_file` (string) - The inventory file to be used by ansible. This + file must exist on your local system and will be uploaded to the + remote machine. - When using an inventory file, it's also required to `--limit` the hosts to the - specified host you're buiding. The `--limit` argument can be provided in the - `extra_arguments` option. +When using an inventory file, it's also required to `--limit` the hosts to the +specified host you're buiding. The `--limit` argument can be provided in the +`extra_arguments` option. - An example inventory file may look like: +An example inventory file may look like: - ``` {.text} - [chi-dbservers] - db-01 ansible_connection=local - db-02 ansible_connection=local +\`\`\` {.text} \[chi-dbservers\] db-01 ansible\_connection=local db-02 +ansible\_connection=local - [chi-appservers] - app-01 ansible_connection=local - app-02 ansible_connection=local +\[chi-appservers\] app-01 ansible\_connection=local app-02 +ansible\_connection=local - [chi:children] - chi-dbservers - chi-appservers +\[chi:children\] chi-dbservers chi-appservers - [dbservers:children] - chi-dbservers +\[dbservers:children\] chi-dbservers - [appservers:children] - chi-appservers - ``` +\[appservers:children\] chi-appservers \`\`\` -- `playbook_dir` (string) - a path to the complete ansible directory structure - on your local system to be copied to the remote machine as the - `staging_directory` before all other files and directories. +- `playbook_dir` (string) - a path to the complete ansible directory structure + on your local system to be copied to the remote machine as the + `staging_directory` before all other files and directories. -- `playbook_paths` (array of strings) - An array of paths to playbook files on - your local system. These will be uploaded to the remote machine under - `staging_directory`/playbooks. By default, this is empty. +- `playbook_paths` (array of strings) - An array of paths to playbook files on + your local system. These will be uploaded to the remote machine under + `staging_directory`/playbooks. By default, this is empty. -- `group_vars` (string) - a path to the directory containing ansible group - variables on your local system to be copied to the remote machine. By default, - this is empty. +- `group_vars` (string) - a path to the directory containing ansible group + variables on your local system to be copied to the remote machine. By + default, this is empty. -- `host_vars` (string) - a path to the directory containing ansible host - variables on your local system to be copied to the remote machine. By default, - this is empty. +- `host_vars` (string) - a path to the directory containing ansible host + variables on your local system to be copied to the remote machine. By + default, this is empty. -- `role_paths` (array of strings) - An array of paths to role directories on - your local system. These will be uploaded to the remote machine under - `staging_directory`/roles. By default, this is empty. +- `role_paths` (array of strings) - An array of paths to role directories on + your local system. These will be uploaded to the remote machine under + `staging_directory`/roles. By default, this is empty. -- `staging_directory` (string) - The directory where all the configuration of - Ansible by Packer will be placed. By default this - is "/tmp/packer-provisioner-ansible-local". This directory doesn't need to - exist but must have proper permissions so that the SSH user that Packer uses - is able to create directories and write into this folder. If the permissions - are not correct, use a shell provisioner prior to this to configure - it properly. +- `staging_directory` (string) - The directory where all the configuration of + Ansible by Packer will be placed. By default this + is "/tmp/packer-provisioner-ansible-local". This directory doesn't need to + exist but must have proper permissions so that the SSH user that Packer uses + is able to create directories and write into this folder. If the permissions + are not correct, use a shell provisioner prior to this to configure + it properly. diff --git a/website/source/docs/provisioners/chef-client.html.markdown b/website/source/docs/provisioners/chef-client.html.markdown index 81d097b7e..aca1a2717 100644 --- a/website/source/docs/provisioners/chef-client.html.markdown +++ b/website/source/docs/provisioners/chef-client.html.markdown @@ -40,70 +40,71 @@ is running must have knife on the path and configured globally, i.e, The reference of available configuration options is listed below. No configuration is actually required. -- `chef_environment` (string) - The name of the chef\_environment sent to the - Chef server. By default this is empty and will not use an environment. +- `chef_environment` (string) - The name of the chef\_environment sent to the + Chef server. By default this is empty and will not use an environment. -- `config_template` (string) - Path to a template that will be used for the Chef - configuration file. By default Packer only sets configuration it needs to - match the settings set in the provisioner configuration. If you need to set - configurations that the Packer provisioner doesn't support, then you should - use a custom configuration template. See the dedicated "Chef Configuration" - section below for more details. +- `config_template` (string) - Path to a template that will be used for the + Chef configuration file. By default Packer only sets configuration it needs + to match the settings set in the provisioner configuration. If you need to + set configurations that the Packer provisioner doesn't support, then you + should use a custom configuration template. See the dedicated "Chef + Configuration" section below for more details. -- `execute_command` (string) - The command used to execute Chef. This has - various [configuration template - variables](/docs/templates/configuration-templates.html) available. See below - for more information. +- `execute_command` (string) - The command used to execute Chef. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See + below for more information. -- `install_command` (string) - The command used to install Chef. This has - various [configuration template - variables](/docs/templates/configuration-templates.html) available. See below - for more information. +- `install_command` (string) - The command used to install Chef. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See + below for more information. -- `json` (object) - An arbitrary mapping of JSON that will be available as node - attributes while running Chef. +- `json` (object) - An arbitrary mapping of JSON that will be available as + node attributes while running Chef. -- `node_name` (string) - The name of the node to register with the Chef Server. - This is optional and by default is packer-{{uuid}}. +- `node_name` (string) - The name of the node to register with the + Chef Server. This is optional and by default is packer-{{uuid}}. -- `prevent_sudo` (boolean) - By default, the configured commands that are - executed to install and run Chef are executed with `sudo`. If this is true, - then the sudo will be omitted. +- `prevent_sudo` (boolean) - By default, the configured commands that are + executed to install and run Chef are executed with `sudo`. If this is true, + then the sudo will be omitted. -- `run_list` (array of strings) - The [run - list](http://docs.opscode.com/essentials_node_object_run_lists.html) for Chef. - By default this is empty, and will use the run list sent down by the - Chef Server. +- `run_list` (array of strings) - The [run + list](http://docs.opscode.com/essentials_node_object_run_lists.html) + for Chef. By default this is empty, and will use the run list sent down by + the Chef Server. -- `server_url` (string) - The URL to the Chef server. This is required. +- `server_url` (string) - The URL to the Chef server. This is required. -- `skip_clean_client` (boolean) - If true, Packer won't remove the client from - the Chef server after it is done running. By default, this is false. +- `skip_clean_client` (boolean) - If true, Packer won't remove the client from + the Chef server after it is done running. By default, this is false. -- `skip_clean_node` (boolean) - If true, Packer won't remove the node from the - Chef server after it is done running. By default, this is false. +- `skip_clean_node` (boolean) - If true, Packer won't remove the node from the + Chef server after it is done running. By default, this is false. -- `skip_install` (boolean) - If true, Chef will not automatically be installed - on the machine using the Opscode omnibus installers. +- `skip_install` (boolean) - If true, Chef will not automatically be installed + on the machine using the Opscode omnibus installers. -- `staging_directory` (string) - This is the directory where all the - configuration of Chef by Packer will be placed. By default this - is "/tmp/packer-chef-client". This directory doesn't need to exist but must - have proper permissions so that the SSH user that Packer uses is able to - create directories and write into this folder. If the permissions are not - correct, use a shell provisioner prior to this to configure it properly. +- `staging_directory` (string) - This is the directory where all the + configuration of Chef by Packer will be placed. By default this + is "/tmp/packer-chef-client". This directory doesn't need to exist but must + have proper permissions so that the SSH user that Packer uses is able to + create directories and write into this folder. If the permissions are not + correct, use a shell provisioner prior to this to configure it properly. -- `client_key` (string) - Path to client key. If not set, this defaults to a - file named client.pem in `staging_directory`. +- `client_key` (string) - Path to client key. If not set, this defaults to a + file named client.pem in `staging_directory`. -- `validation_client_name` (string) - Name of the validation client. If not set, - this won't be set in the configuration and the default that Chef uses will - be used. +- `validation_client_name` (string) - Name of the validation client. If not + set, this won't be set in the configuration and the default that Chef uses + will be used. -- `validation_key_path` (string) - Path to the validation key for communicating - with the Chef Server. This will be uploaded to the remote machine. If this is - NOT set, then it is your responsibility via other means (shell - provisioner, etc.) to get a validation key to where Chef expects it. +- `validation_key_path` (string) - Path to the validation key for + communicating with the Chef Server. This will be uploaded to the + remote machine. If this is NOT set, then it is your responsibility via other + means (shell provisioner, etc.) to get a validation key to where Chef + expects it. ## Chef Configuration @@ -135,9 +136,9 @@ This template is a [configuration template](/docs/templates/configuration-templates.html) and has a set of variables available to use: -- `NodeName` - The node name set in the configuration. -- `ServerUrl` - The URL of the Chef Server set in the configuration. -- `ValidationKeyPath` - Path to the validation key, if it is set. +- `NodeName` - The node name set in the configuration. +- `ServerUrl` - The URL of the Chef Server set in the configuration. +- `ValidationKeyPath` - Path to the validation key, if it is set. ## Execute Command @@ -155,10 +156,10 @@ This command can be customized using the `execute_command` configuration. As you can see from the default value above, the value of this configuration can contain various template variables, defined below: -- `ConfigPath` - The path to the Chef configuration file. file. -- `JsonPath` - The path to the JSON attributes file for the node. -- `Sudo` - A boolean of whether to `sudo` the command or not, depending on the - value of the `prevent_sudo` configuration. +- `ConfigPath` - The path to the Chef configuration file. file. +- `JsonPath` - The path to the JSON attributes file for the node. +- `Sudo` - A boolean of whether to `sudo` the command or not, depending on the + value of the `prevent_sudo` configuration. ## Install Command diff --git a/website/source/docs/provisioners/chef-solo.html.markdown b/website/source/docs/provisioners/chef-solo.html.markdown index 03b55c066..9534c32f1 100644 --- a/website/source/docs/provisioners/chef-solo.html.markdown +++ b/website/source/docs/provisioners/chef-solo.html.markdown @@ -36,71 +36,72 @@ directory relative to your working directory. The reference of available configuration options is listed below. No configuration is actually required, but at least `run_list` is recommended. -- `chef_environment` (string) - The name of the `chef_environment` sent to the - Chef server. By default this is empty and will not use an environment +- `chef_environment` (string) - The name of the `chef_environment` sent to the + Chef server. By default this is empty and will not use an environment -- `config_template` (string) - Path to a template that will be used for the Chef - configuration file. By default Packer only sets configuration it needs to - match the settings set in the provisioner configuration. If you need to set - configurations that the Packer provisioner doesn't support, then you should - use a custom configuration template. See the dedicated "Chef Configuration" - section below for more details. +- `config_template` (string) - Path to a template that will be used for the + Chef configuration file. By default Packer only sets configuration it needs + to match the settings set in the provisioner configuration. If you need to + set configurations that the Packer provisioner doesn't support, then you + should use a custom configuration template. See the dedicated "Chef + Configuration" section below for more details. -- `cookbook_paths` (array of strings) - This is an array of paths to "cookbooks" - directories on your local filesystem. These will be uploaded to the remote - machine in the directory specified by the `staging_directory`. By default, - this is empty. +- `cookbook_paths` (array of strings) - This is an array of paths to + "cookbooks" directories on your local filesystem. These will be uploaded to + the remote machine in the directory specified by the `staging_directory`. By + default, this is empty. -- `data_bags_path` (string) - The path to the "data\_bags" directory on your - local filesystem. These will be uploaded to the remote machine in the - directory specified by the `staging_directory`. By default, this is empty. +- `data_bags_path` (string) - The path to the "data\_bags" directory on your + local filesystem. These will be uploaded to the remote machine in the + directory specified by the `staging_directory`. By default, this is empty. -- `encrypted_data_bag_secret_path` (string) - The path to the file containing - the secret for encrypted data bags. By default, this is empty, so no secret - will be available. +- `encrypted_data_bag_secret_path` (string) - The path to the file containing + the secret for encrypted data bags. By default, this is empty, so no secret + will be available. -- `environments_path` (string) - The path to the "environments" directory on - your local filesystem. These will be uploaded to the remote machine in the - directory specified by the `staging_directory`. By default, this is empty. +- `environments_path` (string) - The path to the "environments" directory on + your local filesystem. These will be uploaded to the remote machine in the + directory specified by the `staging_directory`. By default, this is empty. -- `execute_command` (string) - The command used to execute Chef. This has - various [configuration template - variables](/docs/templates/configuration-templates.html) available. See below - for more information. +- `execute_command` (string) - The command used to execute Chef. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See + below for more information. -- `install_command` (string) - The command used to install Chef. This has - various [configuration template - variables](/docs/templates/configuration-templates.html) available. See below - for more information. +- `install_command` (string) - The command used to install Chef. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See + below for more information. -- `json` (object) - An arbitrary mapping of JSON that will be available as node - attributes while running Chef. +- `json` (object) - An arbitrary mapping of JSON that will be available as + node attributes while running Chef. -- `prevent_sudo` (boolean) - By default, the configured commands that are - executed to install and run Chef are executed with `sudo`. If this is true, - then the sudo will be omitted. +- `prevent_sudo` (boolean) - By default, the configured commands that are + executed to install and run Chef are executed with `sudo`. If this is true, + then the sudo will be omitted. -- `remote_cookbook_paths` (array of strings) - A list of paths on the remote - machine where cookbooks will already exist. These may exist from a previous - provisioner or step. If specified, Chef will be configured to look for - cookbooks here. By default, this is empty. +- `remote_cookbook_paths` (array of strings) - A list of paths on the remote + machine where cookbooks will already exist. These may exist from a previous + provisioner or step. If specified, Chef will be configured to look for + cookbooks here. By default, this is empty. -- `roles_path` (string) - The path to the "roles" directory on your - local filesystem. These will be uploaded to the remote machine in the - directory specified by the `staging_directory`. By default, this is empty. +- `roles_path` (string) - The path to the "roles" directory on your + local filesystem. These will be uploaded to the remote machine in the + directory specified by the `staging_directory`. By default, this is empty. -- `run_list` (array of strings) - The [run - list](https://docs.chef.io/run_lists.html) for Chef. By default this is empty. +- `run_list` (array of strings) - The [run + list](https://docs.chef.io/run_lists.html) for Chef. By default this + is empty. -- `skip_install` (boolean) - If true, Chef will not automatically be installed - on the machine using the Chef omnibus installers. +- `skip_install` (boolean) - If true, Chef will not automatically be installed + on the machine using the Chef omnibus installers. -- `staging_directory` (string) - This is the directory where all the - configuration of Chef by Packer will be placed. By default this - is "/tmp/packer-chef-solo". This directory doesn't need to exist but must have - proper permissions so that the SSH user that Packer uses is able to create - directories and write into this folder. If the permissions are not correct, - use a shell provisioner prior to this to configure it properly. +- `staging_directory` (string) - This is the directory where all the + configuration of Chef by Packer will be placed. By default this + is "/tmp/packer-chef-solo". This directory doesn't need to exist but must + have proper permissions so that the SSH user that Packer uses is able to + create directories and write into this folder. If the permissions are not + correct, use a shell provisioner prior to this to configure it properly. ## Chef Configuration @@ -119,14 +120,14 @@ This template is a [configuration template](/docs/templates/configuration-templates.html) and has a set of variables available to use: -- `ChefEnvironment` - The current enabled environment. Only non-empty if the - environment path is set. -- `CookbookPaths` is the set of cookbook paths ready to embedded directly into a - Ruby array to configure Chef. -- `DataBagsPath` is the path to the data bags folder. -- `EncryptedDataBagSecretPath` - The path to the encrypted data bag secret -- `EnvironmentsPath` - The path to the environments folder. -- `RolesPath` - The path to the roles folder. +- `ChefEnvironment` - The current enabled environment. Only non-empty if the + environment path is set. +- `CookbookPaths` is the set of cookbook paths ready to embedded directly into + a Ruby array to configure Chef. +- `DataBagsPath` is the path to the data bags folder. +- `EncryptedDataBagSecretPath` - The path to the encrypted data bag secret +- `EnvironmentsPath` - The path to the environments folder. +- `RolesPath` - The path to the roles folder. ## Execute Command @@ -144,10 +145,10 @@ This command can be customized using the `execute_command` configuration. As you can see from the default value above, the value of this configuration can contain various template variables, defined below: -- `ConfigPath` - The path to the Chef configuration file. file. -- `JsonPath` - The path to the JSON attributes file for the node. -- `Sudo` - A boolean of whether to `sudo` the command or not, depending on the - value of the `prevent_sudo` configuration. +- `ConfigPath` - The path to the Chef configuration file. file. +- `JsonPath` - The path to the JSON attributes file for the node. +- `Sudo` - A boolean of whether to `sudo` the command or not, depending on the + value of the `prevent_sudo` configuration. ## Install Command diff --git a/website/source/docs/provisioners/file.html.markdown b/website/source/docs/provisioners/file.html.markdown index 3439b4dd6..7799721a5 100644 --- a/website/source/docs/provisioners/file.html.markdown +++ b/website/source/docs/provisioners/file.html.markdown @@ -32,19 +32,19 @@ The file provisioner can upload both single files and complete directories. The available configuration options are listed below. All elements are required. -- `source` (string) - The path to a local file or directory to upload to - the machine. The path can be absolute or relative. If it is relative, it is - relative to the working directory when Packer is executed. If this is a - directory, the existence of a trailing slash is important. Read below on - uploading directories. +- `source` (string) - The path to a local file or directory to upload to + the machine. The path can be absolute or relative. If it is relative, it is + relative to the working directory when Packer is executed. If this is a + directory, the existence of a trailing slash is important. Read below on + uploading directories. -- `destination` (string) - The path where the file will be uploaded to in - the machine. This value must be a writable location and any parent directories - must already exist. +- `destination` (string) - The path where the file will be uploaded to in + the machine. This value must be a writable location and any parent + directories must already exist. -- `direction` (string) - The direction of the file transfer. This defaults to - "upload." If it is set to "download" then the file "source" in the machine wll - be downloaded locally to "destination" +- `direction` (string) - The direction of the file transfer. This defaults to + "upload." If it is set to "download" then the file "source" in the machine + wll be downloaded locally to "destination" ## Directory Uploads diff --git a/website/source/docs/provisioners/powershell.html.markdown b/website/source/docs/provisioners/powershell.html.markdown index ebc56ec4c..4cd862616 100644 --- a/website/source/docs/provisioners/powershell.html.markdown +++ b/website/source/docs/provisioners/powershell.html.markdown @@ -32,52 +32,53 @@ required element is either "inline" or "script". Every other option is optional. Exactly *one* of the following is required: -- `inline` (array of strings) - This is an array of commands to execute. The - commands are concatenated by newlines and turned into a single file, so they - are all executed within the same context. This allows you to change - directories in one command and use something in the directory in the next and - so on. Inline scripts are the easiest way to pull off simple tasks within - the machine. +- `inline` (array of strings) - This is an array of commands to execute. The + commands are concatenated by newlines and turned into a single file, so they + are all executed within the same context. This allows you to change + directories in one command and use something in the directory in the next + and so on. Inline scripts are the easiest way to pull off simple tasks + within the machine. -- `script` (string) - The path to a script to upload and execute in the machine. - This path can be absolute or relative. If it is relative, it is relative to - the working directory when Packer is executed. +- `script` (string) - The path to a script to upload and execute in + the machine. This path can be absolute or relative. If it is relative, it is + relative to the working directory when Packer is executed. -- `scripts` (array of strings) - An array of scripts to execute. The scripts - will be uploaded and executed in the order specified. Each script is executed - in isolation, so state such as variables from one script won't carry on to - the next. +- `scripts` (array of strings) - An array of scripts to execute. The scripts + will be uploaded and executed in the order specified. Each script is + executed in isolation, so state such as variables from one script won't + carry on to the next. Optional parameters: -- `binary` (boolean) - If true, specifies that the script(s) are binary files, - and Packer should therefore not convert Windows line endings to Unix line - endings (if there are any). By default this is false. +- `binary` (boolean) - If true, specifies that the script(s) are binary files, + and Packer should therefore not convert Windows line endings to Unix line + endings (if there are any). By default this is false. -- `environment_vars` (array of strings) - An array of key/value pairs to inject - prior to the execute\_command. The format should be `key=value`. Packer - injects some environmental variables by default into the environment, as well, - which are covered in the section below. +- `environment_vars` (array of strings) - An array of key/value pairs to + inject prior to the execute\_command. The format should be `key=value`. + Packer injects some environmental variables by default into the environment, + as well, which are covered in the section below. -- `execute_command` (string) - The command to use to execute the script. By - default this is `powershell "& { {{.Vars}}{{.Path}}; exit $LastExitCode}"`. - The value of this is treated as [configuration - template](/docs/templates/configuration-templates.html). There are two - available variables: `Path`, which is the path to the script to run, and - `Vars`, which is the list of `environment_vars`, if configured. +- `execute_command` (string) - The command to use to execute the script. By + default this is `powershell "& { {{.Vars}}{{.Path}}; exit $LastExitCode}"`. + The value of this is treated as [configuration + template](/docs/templates/configuration-templates.html). There are two + available variables: `Path`, which is the path to the script to run, and + `Vars`, which is the list of `environment_vars`, if configured. -- `elevated_user` and `elevated_password` (string) - If specified, the - PowerShell script will be run with elevated privileges using the given - Windows user. +- `elevated_user` and `elevated_password` (string) - If specified, the + PowerShell script will be run with elevated privileges using the given + Windows user. -- `remote_path` (string) - The path where the script will be uploaded to in - the machine. This defaults to "/tmp/script.sh". This value must be a writable - location and any parent directories must already exist. +- `remote_path` (string) - The path where the script will be uploaded to in + the machine. This defaults to "/tmp/script.sh". This value must be a + writable location and any parent directories must already exist. -- `start_retry_timeout` (string) - The amount of time to attempt to *start* the - remote process. By default this is "5m" or 5 minutes. This setting exists in - order to deal with times when SSH may restart, such as a system reboot. Set - this to a higher value if reboots take a longer amount of time. +- `start_retry_timeout` (string) - The amount of time to attempt to *start* + the remote process. By default this is "5m" or 5 minutes. This setting + exists in order to deal with times when SSH may restart, such as a + system reboot. Set this to a higher value if reboots take a longer amount + of time. -- `valid_exit_codes` (list of ints) - Valid exit codes for the script. By - default this is just 0. +- `valid_exit_codes` (list of ints) - Valid exit codes for the script. By + default this is just 0. diff --git a/website/source/docs/provisioners/puppet-masterless.html.markdown b/website/source/docs/provisioners/puppet-masterless.html.markdown index ac5f4f628..7ef13265e 100644 --- a/website/source/docs/provisioners/puppet-masterless.html.markdown +++ b/website/source/docs/provisioners/puppet-masterless.html.markdown @@ -45,59 +45,58 @@ The reference of available configuration options is listed below. Required parameters: -- `manifest_file` (string) - This is either a path to a puppet manifest - (`.pp` file) *or* a directory containing multiple manifests that puppet will - apply (the ["main - manifest"](https://docs.puppetlabs.com/puppet/latest/reference/dirs_manifest.html)). - These file(s) must exist on your local system and will be uploaded to the - remote machine. +- `manifest_file` (string) - This is either a path to a puppet manifest + (`.pp` file) *or* a directory containing multiple manifests that puppet will + apply (the ["main + manifest"](https://docs.puppetlabs.com/puppet/latest/reference/dirs_manifest.html)). + These file(s) must exist on your local system and will be uploaded to the + remote machine. Optional parameters: -- `execute_command` (string) - The command used to execute Puppet. This has - various [configuration template - variables](/docs/templates/configuration-templates.html) available. See below - for more information. +- `execute_command` (string) - The command used to execute Puppet. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See + below for more information. -- `facter` (object of key/value strings) - Additional - [facts](http://puppetlabs.com/puppet/related-projects/facter) to make - available when Puppet is running. +- `facter` (object of key/value strings) - Additional + [facts](http://puppetlabs.com/puppet/related-projects/facter) to make + available when Puppet is running. -- `hiera_config_path` (string) - The path to a local file with hiera - configuration to be uploaded to the remote machine. Hiera data directories - must be uploaded using the file provisioner separately. +- `hiera_config_path` (string) - The path to a local file with hiera + configuration to be uploaded to the remote machine. Hiera data directories + must be uploaded using the file provisioner separately. -- `manifest_dir` (string) - The path to a local directory with manifests to be - uploaded to the remote machine. This is useful if your main manifest file - uses imports. This directory doesn't necessarily contain the `manifest_file`. - It is a separate directory that will be set as the "manifestdir" setting - on Puppet. +- `manifest_dir` (string) - The path to a local directory with manifests to be + uploaded to the remote machine. This is useful if your main manifest file + uses imports. This directory doesn't necessarily contain the + `manifest_file`. It is a separate directory that will be set as the + "manifestdir" setting on Puppet. - \~> `manifest_dir` is passed to `puppet apply` as the - `--manifestdir` option. This option was deprecated in puppet 3.6, and removed - in puppet 4.0. If you have multiple manifests you should use - `manifest_file` instead. +\~> `manifest_dir` is passed to `puppet apply` as the `--manifestdir` option. +This option was deprecated in puppet 3.6, and removed in puppet 4.0. If you have +multiple manifests you should use `manifest_file` instead. -- `module_paths` (array of strings) - This is an array of paths to module - directories on your local filesystem. These will be uploaded to the - remote machine. By default, this is empty. +- `module_paths` (array of strings) - This is an array of paths to module + directories on your local filesystem. These will be uploaded to the + remote machine. By default, this is empty. -- `prevent_sudo` (boolean) - By default, the configured commands that are - executed to run Puppet are executed with `sudo`. If this is true, then the - sudo will be omitted. +- `prevent_sudo` (boolean) - By default, the configured commands that are + executed to run Puppet are executed with `sudo`. If this is true, then the + sudo will be omitted. -- `staging_directory` (string) - This is the directory where all the - configuration of Puppet by Packer will be placed. By default this - is "/tmp/packer-puppet-masterless". This directory doesn't need to exist but - must have proper permissions so that the SSH user that Packer uses is able to - create directories and write into this folder. If the permissions are not - correct, use a shell provisioner prior to this to configure it properly. +- `staging_directory` (string) - This is the directory where all the + configuration of Puppet by Packer will be placed. By default this + is "/tmp/packer-puppet-masterless". This directory doesn't need to exist but + must have proper permissions so that the SSH user that Packer uses is able + to create directories and write into this folder. If the permissions are not + correct, use a shell provisioner prior to this to configure it properly. -- `working_directory` (string) - This is the directory from which the puppet - command will be run. When using hiera with a relative path, this option allows - to ensure that the paths are working properly. If not specified, defaults to - the value of specified `staging_directory` (or its default value if not - specified either). +- `working_directory` (string) - This is the directory from which the puppet + command will be run. When using hiera with a relative path, this option + allows to ensure that the paths are working properly. If not specified, + defaults to the value of specified `staging_directory` (or its default value + if not specified either). ## Execute Command @@ -119,15 +118,15 @@ This command can be customized using the `execute_command` configuration. As you can see from the default value above, the value of this configuration can contain various template variables, defined below: -- `WorkingDir` - The path from which Puppet will be executed. -- `FacterVars` - Shell-friendly string of environmental variables used to set - custom facts configured for this provisioner. -- `HieraConfigPath` - The path to a hiera configuration file. -- `ManifestFile` - The path on the remote machine to the manifest file for - Puppet to use. -- `ModulePath` - The paths to the module directories. -- `Sudo` - A boolean of whether to `sudo` the command or not, depending on the - value of the `prevent_sudo` configuration. +- `WorkingDir` - The path from which Puppet will be executed. +- `FacterVars` - Shell-friendly string of environmental variables used to set + custom facts configured for this provisioner. +- `HieraConfigPath` - The path to a hiera configuration file. +- `ManifestFile` - The path on the remote machine to the manifest file for + Puppet to use. +- `ModulePath` - The paths to the module directories. +- `Sudo` - A boolean of whether to `sudo` the command or not, depending on the + value of the `prevent_sudo` configuration. ## Default Facts @@ -135,10 +134,10 @@ In addition to being able to specify custom Facter facts using the `facter` configuration, the provisioner automatically defines certain commonly useful facts: -- `packer_build_name` is set to the name of the build that Packer is running. - This is most useful when Packer is making multiple builds and you want to - distinguish them in your Hiera hierarchy. +- `packer_build_name` is set to the name of the build that Packer is running. + This is most useful when Packer is making multiple builds and you want to + distinguish them in your Hiera hierarchy. -- `packer_builder_type` is the type of the builder that was used to create the - machine that Puppet is running on. This is useful if you want to run only - certain parts of your Puppet code on systems built with certain builders. +- `packer_builder_type` is the type of the builder that was used to create the + machine that Puppet is running on. This is useful if you want to run only + certain parts of your Puppet code on systems built with certain builders. diff --git a/website/source/docs/provisioners/puppet-server.html.markdown b/website/source/docs/provisioners/puppet-server.html.markdown index 32bcadbe8..bf469956b 100644 --- a/website/source/docs/provisioners/puppet-server.html.markdown +++ b/website/source/docs/provisioners/puppet-server.html.markdown @@ -41,36 +41,36 @@ The reference of available configuration options is listed below. The provisioner takes various options. None are strictly required. They are listed below: -- `client_cert_path` (string) - Path to the client certificate for the node on - your disk. This defaults to nothing, in which case a client cert won't - be uploaded. +- `client_cert_path` (string) - Path to the client certificate for the node on + your disk. This defaults to nothing, in which case a client cert won't + be uploaded. -- `client_private_key_path` (string) - Path to the client private key for the - node on your disk. This defaults to nothing, in which case a client private - key won't be uploaded. +- `client_private_key_path` (string) - Path to the client private key for the + node on your disk. This defaults to nothing, in which case a client private + key won't be uploaded. -- `facter` (object of key/value strings) - Additional Facter facts to make - available to the Puppet run. +- `facter` (object of key/value strings) - Additional Facter facts to make + available to the Puppet run. -- `ignore_exit_codes` (boolean) - If true, Packer will never consider the - provisioner a failure. +- `ignore_exit_codes` (boolean) - If true, Packer will never consider the + provisioner a failure. -- `options` (string) - Additional command line options to pass to `puppet agent` - when Puppet is ran. +- `options` (string) - Additional command line options to pass to + `puppet agent` when Puppet is ran. -- `prevent_sudo` (boolean) - By default, the configured commands that are - executed to run Puppet are executed with `sudo`. If this is true, then the - sudo will be omitted. +- `prevent_sudo` (boolean) - By default, the configured commands that are + executed to run Puppet are executed with `sudo`. If this is true, then the + sudo will be omitted. -- `puppet_node` (string) - The name of the node. If this isn't set, the fully - qualified domain name will be used. +- `puppet_node` (string) - The name of the node. If this isn't set, the fully + qualified domain name will be used. -- `puppet_server` (string) - Hostname of the Puppet server. By default "puppet" - will be used. +- `puppet_server` (string) - Hostname of the Puppet server. By default + "puppet" will be used. -- `staging_directory` (string) - This is the directory where all the - configuration of Puppet by Packer will be placed. By default this - is "/tmp/packer-puppet-server". This directory doesn't need to exist but must - have proper permissions so that the SSH user that Packer uses is able to - create directories and write into this folder. If the permissions are not - correct, use a shell provisioner prior to this to configure it properly. +- `staging_directory` (string) - This is the directory where all the + configuration of Puppet by Packer will be placed. By default this + is "/tmp/packer-puppet-server". This directory doesn't need to exist but + must have proper permissions so that the SSH user that Packer uses is able + to create directories and write into this folder. If the permissions are not + correct, use a shell provisioner prior to this to configure it properly. diff --git a/website/source/docs/provisioners/salt-masterless.html.markdown b/website/source/docs/provisioners/salt-masterless.html.markdown index cc1ab1f7b..84171a071 100644 --- a/website/source/docs/provisioners/salt-masterless.html.markdown +++ b/website/source/docs/provisioners/salt-masterless.html.markdown @@ -31,28 +31,28 @@ required argument is the path to your local salt state tree. Optional: -- `bootstrap_args` (string) - Arguments to send to the bootstrap script. Usage - is somewhat documented on - [github](https://github.com/saltstack/salt-bootstrap), but the [script - itself](https://github.com/saltstack/salt-bootstrap/blob/develop/bootstrap-salt.sh) - has more detailed usage instructions. By default, no arguments are sent to - the script. +- `bootstrap_args` (string) - Arguments to send to the bootstrap script. Usage + is somewhat documented on + [github](https://github.com/saltstack/salt-bootstrap), but the [script + itself](https://github.com/saltstack/salt-bootstrap/blob/develop/bootstrap-salt.sh) + has more detailed usage instructions. By default, no arguments are sent to + the script. -- `local_pillar_roots` (string) - The path to your local [pillar - roots](http://docs.saltstack.com/ref/configuration/master.html#pillar-configuration). - This will be uploaded to the `/srv/pillar` on the remote. +- `local_pillar_roots` (string) - The path to your local [pillar + roots](http://docs.saltstack.com/ref/configuration/master.html#pillar-configuration). + This will be uploaded to the `/srv/pillar` on the remote. -- `local_state_tree` (string) - The path to your local [state - tree](http://docs.saltstack.com/ref/states/highstate.html#the-salt-state-tree). - This will be uploaded to the `/srv/salt` on the remote. +- `local_state_tree` (string) - The path to your local [state + tree](http://docs.saltstack.com/ref/states/highstate.html#the-salt-state-tree). + This will be uploaded to the `/srv/salt` on the remote. -- `minion_config` (string) - The path to your local [minion - config](http://docs.saltstack.com/topics/configuration.html). This will be - uploaded to the `/etc/salt` on the remote. +- `minion_config` (string) - The path to your local [minion + config](http://docs.saltstack.com/topics/configuration.html). This will be + uploaded to the `/etc/salt` on the remote. -- `skip_bootstrap` (boolean) - By default the salt provisioner runs [salt - bootstrap](https://github.com/saltstack/salt-bootstrap) to install salt. Set - this to true to skip this step. +- `skip_bootstrap` (boolean) - By default the salt provisioner runs [salt + bootstrap](https://github.com/saltstack/salt-bootstrap) to install salt. Set + this to true to skip this step. -- `temp_config_dir` (string) - Where your local state tree will be copied before - moving to the `/srv/salt` directory. Default is `/tmp/salt`. +- `temp_config_dir` (string) - Where your local state tree will be copied + before moving to the `/srv/salt` directory. Default is `/tmp/salt`. diff --git a/website/source/docs/provisioners/shell.html.markdown b/website/source/docs/provisioners/shell.html.markdown index 97015a847..9cd05ef12 100644 --- a/website/source/docs/provisioners/shell.html.markdown +++ b/website/source/docs/provisioners/shell.html.markdown @@ -37,55 +37,56 @@ required element is either "inline" or "script". Every other option is optional. Exactly *one* of the following is required: -- `inline` (array of strings) - This is an array of commands to execute. The - commands are concatenated by newlines and turned into a single file, so they - are all executed within the same context. This allows you to change - directories in one command and use something in the directory in the next and - so on. Inline scripts are the easiest way to pull off simple tasks within - the machine. +- `inline` (array of strings) - This is an array of commands to execute. The + commands are concatenated by newlines and turned into a single file, so they + are all executed within the same context. This allows you to change + directories in one command and use something in the directory in the next + and so on. Inline scripts are the easiest way to pull off simple tasks + within the machine. -- `script` (string) - The path to a script to upload and execute in the machine. - This path can be absolute or relative. If it is relative, it is relative to - the working directory when Packer is executed. +- `script` (string) - The path to a script to upload and execute in + the machine. This path can be absolute or relative. If it is relative, it is + relative to the working directory when Packer is executed. -- `scripts` (array of strings) - An array of scripts to execute. The scripts - will be uploaded and executed in the order specified. Each script is executed - in isolation, so state such as variables from one script won't carry on to - the next. +- `scripts` (array of strings) - An array of scripts to execute. The scripts + will be uploaded and executed in the order specified. Each script is + executed in isolation, so state such as variables from one script won't + carry on to the next. Optional parameters: -- `binary` (boolean) - If true, specifies that the script(s) are binary files, - and Packer should therefore not convert Windows line endings to Unix line - endings (if there are any). By default this is false. +- `binary` (boolean) - If true, specifies that the script(s) are binary files, + and Packer should therefore not convert Windows line endings to Unix line + endings (if there are any). By default this is false. -- `environment_vars` (array of strings) - An array of key/value pairs to inject - prior to the execute\_command. The format should be `key=value`. Packer - injects some environmental variables by default into the environment, as well, - which are covered in the section below. +- `environment_vars` (array of strings) - An array of key/value pairs to + inject prior to the execute\_command. The format should be `key=value`. + Packer injects some environmental variables by default into the environment, + as well, which are covered in the section below. -- `execute_command` (string) - The command to use to execute the script. By - default this is `chmod +x {{ .Path }}; {{ .Vars }} {{ .Path }}`. The value of - this is treated as [configuration - template](/docs/templates/configuration-templates.html). There are two - available variables: `Path`, which is the path to the script to run, and - `Vars`, which is the list of `environment_vars`, if configured. +- `execute_command` (string) - The command to use to execute the script. By + default this is `chmod +x {{ .Path }}; {{ .Vars }} {{ .Path }}`. The value + of this is treated as [configuration + template](/docs/templates/configuration-templates.html). There are two + available variables: `Path`, which is the path to the script to run, and + `Vars`, which is the list of `environment_vars`, if configured. -- `inline_shebang` (string) - The - [shebang](http://en.wikipedia.org/wiki/Shebang_%28Unix%29) value to use when - running commands specified by `inline`. By default, this is `/bin/sh -e`. If - you're not using `inline`, then this configuration has no effect. - **Important:** If you customize this, be sure to include something like the - `-e` flag, otherwise individual steps failing won't fail the provisioner. +- `inline_shebang` (string) - The + [shebang](http://en.wikipedia.org/wiki/Shebang_%28Unix%29) value to use when + running commands specified by `inline`. By default, this is `/bin/sh -e`. If + you're not using `inline`, then this configuration has no effect. + **Important:** If you customize this, be sure to include something like the + `-e` flag, otherwise individual steps failing won't fail the provisioner. -- `remote_path` (string) - The path where the script will be uploaded to in - the machine. This defaults to "/tmp/script.sh". This value must be a writable - location and any parent directories must already exist. +- `remote_path` (string) - The path where the script will be uploaded to in + the machine. This defaults to "/tmp/script.sh". This value must be a + writable location and any parent directories must already exist. -- `start_retry_timeout` (string) - The amount of time to attempt to *start* the - remote process. By default this is "5m" or 5 minutes. This setting exists in - order to deal with times when SSH may restart, such as a system reboot. Set - this to a higher value if reboots take a longer amount of time. +- `start_retry_timeout` (string) - The amount of time to attempt to *start* + the remote process. By default this is "5m" or 5 minutes. This setting + exists in order to deal with times when SSH may restart, such as a + system reboot. Set this to a higher value if reboots take a longer amount + of time. ## Execute Command Example @@ -128,13 +129,13 @@ In addition to being able to specify custom environmental variables using the `environment_vars` configuration, the provisioner automatically defines certain commonly useful environmental variables: -- `PACKER_BUILD_NAME` is set to the name of the build that Packer is running. - This is most useful when Packer is making multiple builds and you want to - distinguish them slightly from a common provisioning script. +- `PACKER_BUILD_NAME` is set to the name of the build that Packer is running. + This is most useful when Packer is making multiple builds and you want to + distinguish them slightly from a common provisioning script. -- `PACKER_BUILDER_TYPE` is the type of the builder that was used to create the - machine that the script is running on. This is useful if you want to run only - certain parts of the script on systems built with certain builders. +- `PACKER_BUILDER_TYPE` is the type of the builder that was used to create the + machine that the script is running on. This is useful if you want to run + only certain parts of the script on systems built with certain builders. ## Handling Reboots @@ -181,46 +182,41 @@ provisioner](/docs/provisioners/file.html) (more secure) or using `ssh-keyscan` to populate the file (less secure). An example of the latter accessing github would be: - { - "type": "shell", - "inline": [ - "sudo apt-get install -y git", - "ssh-keyscan github.com >> ~/.ssh/known_hosts", - "git clone git@github.com:exampleorg/myprivaterepo.git" - ] - } +{ "type": "shell", "inline": \[ "sudo apt-get install -y git", "ssh-keyscan +github.com >> \~/.ssh/known\_hosts", "git clone +git@github.com:exampleorg/myprivaterepo.git" \] } ## Troubleshooting *My shell script doesn't work correctly on Ubuntu* -- On Ubuntu, the `/bin/sh` shell is - [dash](http://en.wikipedia.org/wiki/Debian_Almquist_shell). If your script has - [bash](http://en.wikipedia.org/wiki/Bash_(Unix_shell))-specific commands in - it, then put `#!/bin/bash` at the top of your script. Differences between dash - and bash can be found on the - [DashAsBinSh](https://wiki.ubuntu.com/DashAsBinSh) Ubuntu wiki page. +- On Ubuntu, the `/bin/sh` shell is + [dash](http://en.wikipedia.org/wiki/Debian_Almquist_shell). If your script + has [bash](http://en.wikipedia.org/wiki/Bash_(Unix_shell))-specific commands + in it, then put `#!/bin/bash` at the top of your script. Differences between + dash and bash can be found on the + [DashAsBinSh](https://wiki.ubuntu.com/DashAsBinSh) Ubuntu wiki page. *My shell works when I login but fails with the shell provisioner* -- See the above tip. More than likely, your login shell is using `/bin/bash` - while the provisioner is using `/bin/sh`. +- See the above tip. More than likely, your login shell is using `/bin/bash` + while the provisioner is using `/bin/sh`. *My installs hang when using `apt-get` or `yum`* -- Make sure you add a `-y` to the command to prevent it from requiring user - input before proceeding. +- Make sure you add a `-y` to the command to prevent it from requiring user + input before proceeding. *How do I tell what my shell script is doing?* -- Adding a `-x` flag to the shebang at the top of the script (`#!/bin/sh -x`) - will echo the script statements as it is executing. +- Adding a `-x` flag to the shebang at the top of the script (`#!/bin/sh -x`) + will echo the script statements as it is executing. *My builds don't always work the same* -- Some distributions start the SSH daemon before other core services which can - create race conditions. Your first provisioner can tell the machine to wait - until it completely boots. +- Some distributions start the SSH daemon before other core services which can + create race conditions. Your first provisioner can tell the machine to wait + until it completely boots. ``` {.javascript} { diff --git a/website/source/docs/templates/configuration-templates.html.markdown b/website/source/docs/templates/configuration-templates.html.markdown index 9bc8f835e..c78f13956 100644 --- a/website/source/docs/templates/configuration-templates.html.markdown +++ b/website/source/docs/templates/configuration-templates.html.markdown @@ -57,17 +57,17 @@ While some configuration settings have local variables specific to only that configuration, a set of functions are available globally for use in *any string* in Packer templates. These are listed below for reference. -- `build_name` - The name of the build being run. -- `build_type` - The type of the builder being used currently. -- `isotime [FORMAT]` - UTC time, which can be - [formatted](http://golang.org/pkg/time/#example_Time_Format). See more - examples below. -- `lower` - Lowercases the string. -- `pwd` - The working directory while executing Packer. -- `template_dir` - The directory to the template for the build. -- `timestamp` - The current Unix timestamp in UTC. -- `uuid` - Returns a random UUID. -- `upper` - Uppercases the string. +- `build_name` - The name of the build being run. +- `build_type` - The type of the builder being used currently. +- `isotime [FORMAT]` - UTC time, which can be + [formatted](http://golang.org/pkg/time/#example_Time_Format). See more + examples below. +- `lower` - Lowercases the string. +- `pwd` - The working directory while executing Packer. +- `template_dir` - The directory to the template for the build. +- `timestamp` - The current Unix timestamp in UTC. +- `uuid` - Returns a random UUID. +- `upper` - Uppercases the string. ### isotime Format @@ -112,7 +112,8 @@ Timezone Numeric -- +- + 01 @@ -147,19 +148,24 @@ Monday (Mon) January (Jan) -- +- + -- +- + -- +- + -- +- + -- +- + MST @@ -205,6 +211,6 @@ Please note that double quote characters need escaping inside of templates: Specific to Amazon builders: -- `clean_ami_name` - AMI names can only contain certain characters. This - function will replace illegal characters with a '-" character. Example usage - since ":" is not a legal AMI name is: `{{isotime | clean_ami_name}}`. +- `clean_ami_name` - AMI names can only contain certain characters. This + function will replace illegal characters with a '-" character. Example usage + since ":" is not a legal AMI name is: `{{isotime | clean_ami_name}}`. diff --git a/website/source/docs/templates/introduction.html.markdown b/website/source/docs/templates/introduction.html.markdown index 1d67ea196..c48dc6c73 100644 --- a/website/source/docs/templates/introduction.html.markdown +++ b/website/source/docs/templates/introduction.html.markdown @@ -27,40 +27,41 @@ A template is a JSON object that has a set of keys configuring various components of Packer. The available keys within a template are listed below. Along with each key, it is noted whether it is required or not. -- `builders` (*required*) is an array of one or more objects that defines the - builders that will be used to create machine images for this template, and - configures each of those builders. For more information on how to define and - configure a builder, read the sub-section on [configuring builders in - templates](/docs/templates/builders.html). +- `builders` (*required*) is an array of one or more objects that defines the + builders that will be used to create machine images for this template, and + configures each of those builders. For more information on how to define and + configure a builder, read the sub-section on [configuring builders in + templates](/docs/templates/builders.html). -- `description` (optional) is a string providing a description of what the - template does. This output is used only in the [inspect - command](/docs/command-line/inspect.html). +- `description` (optional) is a string providing a description of what the + template does. This output is used only in the [inspect + command](/docs/command-line/inspect.html). -- `min_packer_version` (optional) is a string that has a minimum Packer version - that is required to parse the template. This can be used to ensure that proper - versions of Packer are used with the template. A max version can't be - specified because Packer retains backwards compatibility with `packer fix`. +- `min_packer_version` (optional) is a string that has a minimum Packer + version that is required to parse the template. This can be used to ensure + that proper versions of Packer are used with the template. A max version + can't be specified because Packer retains backwards compatibility with + `packer fix`. -- `post-processors` (optional) is an array of one or more objects that defines - the various post-processing steps to take with the built images. If not - specified, then no post-processing will be done. For more information on what - post-processors do and how they're defined, read the sub-section on - [configuring post-processors in - templates](/docs/templates/post-processors.html). +- `post-processors` (optional) is an array of one or more objects that defines + the various post-processing steps to take with the built images. If not + specified, then no post-processing will be done. For more information on + what post-processors do and how they're defined, read the sub-section on + [configuring post-processors in + templates](/docs/templates/post-processors.html). -- `provisioners` (optional) is an array of one or more objects that defines the - provisioners that will be used to install and configure software for the - machines created by each of the builders. If it is not specified, then no - provisioners will be run. For more information on how to define and configure - a provisioner, read the sub-section on [configuring provisioners in - templates](/docs/templates/provisioners.html). +- `provisioners` (optional) is an array of one or more objects that defines + the provisioners that will be used to install and configure software for the + machines created by each of the builders. If it is not specified, then no + provisioners will be run. For more information on how to define and + configure a provisioner, read the sub-section on [configuring provisioners + in templates](/docs/templates/provisioners.html). -- `variables` (optional) is an array of one or more key/value strings that - defines user variables contained in the template. If it is not specified, then - no variables are defined. For more information on how to define and use user - variables, read the sub-section on [user variables in - templates](/docs/templates/user-variables.html). +- `variables` (optional) is an array of one or more key/value strings that + defines user variables contained in the template. If it is not specified, + then no variables are defined. For more information on how to define and use + user variables, read the sub-section on [user variables in + templates](/docs/templates/user-variables.html). ## Comments diff --git a/website/source/docs/templates/push.html.markdown b/website/source/docs/templates/push.html.markdown index 3ca2c2de2..b46bef3e8 100644 --- a/website/source/docs/templates/push.html.markdown +++ b/website/source/docs/templates/push.html.markdown @@ -37,31 +37,31 @@ each category, the available configuration keys are alphabetized. ### Required -- `name` (string) - Name of the build configuration in the build service. If - this doesn't exist, it will be created (by default). +- `name` (string) - Name of the build configuration in the build service. If + this doesn't exist, it will be created (by default). ### Optional -- `address` (string) - The address of the build service to use. By default this - is `https://atlas.hashicorp.com`. +- `address` (string) - The address of the build service to use. By default + this is `https://atlas.hashicorp.com`. -- `base_dir` (string) - The base directory of the files to upload. This will be - the current working directory when the build service executes your template. - This path is relative to the template. +- `base_dir` (string) - The base directory of the files to upload. This will + be the current working directory when the build service executes + your template. This path is relative to the template. -- `include` (array of strings) - Glob patterns to include relative to the - `base_dir`. If this is specified, only files that match the include pattern - are included. +- `include` (array of strings) - Glob patterns to include relative to the + `base_dir`. If this is specified, only files that match the include pattern + are included. -- `exclude` (array of strings) - Glob patterns to exclude relative to the - `base_dir`. +- `exclude` (array of strings) - Glob patterns to exclude relative to the + `base_dir`. -- `token` (string) - An access token to use to authenticate to the - build service. +- `token` (string) - An access token to use to authenticate to the + build service. -- `vcs` (boolean) - If true, Packer will detect your VCS (if there is one) and - only upload the files that are tracked by the VCS. This is useful for - automatically excluding ignored files. This defaults to false. +- `vcs` (boolean) - If true, Packer will detect your VCS (if there is one) and + only upload the files that are tracked by the VCS. This is useful for + automatically excluding ignored files. This defaults to false. ## Examples diff --git a/website/source/intro/platforms.html.markdown b/website/source/intro/platforms.html.markdown index 586c0c4ec..86d71545e 100644 --- a/website/source/intro/platforms.html.markdown +++ b/website/source/intro/platforms.html.markdown @@ -33,40 +33,42 @@ is noted. They are listed in alphabetical order. For more detailed information on supported configuration parameters and usage, please see the appropriate [documentation page within the documentation section](/docs). -- ***Amazon EC2 (AMI)***. Both EBS-backed and instance-store AMIs within - [EC2](http://aws.amazon.com/ec2/), optionally distributed to multiple regions. +- ***Amazon EC2 (AMI)***. Both EBS-backed and instance-store AMIs within + [EC2](http://aws.amazon.com/ec2/), optionally distributed to + multiple regions. -- ***DigitalOcean***. Snapshots for [DigitalOcean](http://www.digitalocean.com/) - that can be used to start a pre-configured DigitalOcean instance of any size. +- ***DigitalOcean***. Snapshots for + [DigitalOcean](http://www.digitalocean.com/) that can be used to start a + pre-configured DigitalOcean instance of any size. -- ***Docker***. Snapshots for [Docker](http://www.docker.io/) that can be used - to start a pre-configured Docker instance. +- ***Docker***. Snapshots for [Docker](http://www.docker.io/) that can be used + to start a pre-configured Docker instance. -- ***Google Compute Engine***. Snapshots for [Google Compute - Engine](https://cloud.google.com/products/compute-engine) that can be used to - start a pre-configured Google Compute Engine instance. +- ***Google Compute Engine***. Snapshots for [Google Compute + Engine](https://cloud.google.com/products/compute-engine) that can be used + to start a pre-configured Google Compute Engine instance. -- ***OpenStack***. Images for [OpenStack](http://www.openstack.org/) that can be - used to start pre-configured OpenStack servers. +- ***OpenStack***. Images for [OpenStack](http://www.openstack.org/) that can + be used to start pre-configured OpenStack servers. -- ***Parallels (PVM)***. Exported virtual machines for - [Parallels](http://www.parallels.com/downloads/desktop/), including virtual - machine metadata such as RAM, CPUs, etc. These virtual machines are portable - and can be started on any platform Parallels runs on. +- ***Parallels (PVM)***. Exported virtual machines for + [Parallels](http://www.parallels.com/downloads/desktop/), including virtual + machine metadata such as RAM, CPUs, etc. These virtual machines are portable + and can be started on any platform Parallels runs on. -- ***QEMU***. Images for [KVM](http://www.linux-kvm.org/) or - [Xen](http://www.xenproject.org/) that can be used to start pre-configured KVM - or Xen instances. +- ***QEMU***. Images for [KVM](http://www.linux-kvm.org/) or + [Xen](http://www.xenproject.org/) that can be used to start pre-configured + KVM or Xen instances. -- ***VirtualBox (OVF)***. Exported virtual machines for - [VirtualBox](https://www.virtualbox.org/), including virtual machine metadata - such as RAM, CPUs, etc. These virtual machines are portable and can be started - on any platform VirtualBox runs on. +- ***VirtualBox (OVF)***. Exported virtual machines for + [VirtualBox](https://www.virtualbox.org/), including virtual machine + metadata such as RAM, CPUs, etc. These virtual machines are portable and can + be started on any platform VirtualBox runs on. -- ***VMware (VMX)***. Exported virtual machines for - [VMware](http://www.vmware.com/) that can be run within any desktop products - such as Fusion, Player, or Workstation, as well as server products such - as vSphere. +- ***VMware (VMX)***. Exported virtual machines for + [VMware](http://www.vmware.com/) that can be run within any desktop products + such as Fusion, Player, or Workstation, as well as server products such + as vSphere. As previously mentioned, these are just the target image types that Packer ships with out of the box. You can always [extend Packer through From 1e9459a0675ddf635b04860a4e6afee62e1d8be0 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 23 Jul 2015 00:02:18 -0700 Subject: [PATCH 023/100] Changed push docs to more clearly explain how they work with Atlas --- .../docs/command-line/push.html.markdown | 33 +++++++++---------- 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/website/source/docs/command-line/push.html.markdown b/website/source/docs/command-line/push.html.markdown index 764333967..96e5b3e20 100644 --- a/website/source/docs/command-line/push.html.markdown +++ b/website/source/docs/command-line/push.html.markdown @@ -1,28 +1,21 @@ --- description: | - The `packer push` Packer command takes a template and pushes it to a build - service that will automatically build this Packer template. + The `packer push` command uploads a template and other required files to the Atlas build service, which will run your packer build for you. layout: docs page_title: 'Push - Command-Line' ... # Command-Line: Push -The `packer push` Packer command takes a template and pushes it to a Packer -build service such as [HashiCorp's Atlas](https://atlas.hashicorp.com). The -build service will automatically build your Packer template and expose the -artifacts. +The `packer push` command uploads a template and other required files to the Atlas service, which will run your packer build for you. [Learn more about Packer in Atlas.](https://atlas.hashicorp.com/help/packer/features) -External build services such as HashiCorp's Atlas make it easy to iterate on -Packer templates, especially when the builder you are running may not be easily -accessable (such as developing `qemu` builders on Mac or Windows). +Running builds remotely makes it easier to iterate on packer builds that are not supported on your operating system, for example, building docker or QEMU while developing on Mac or Windows. Also, the hard work of building VMs is offloaded to dedicated servers with more CPU, memory, and network resources. -!> The Packer build service will receive the raw copy of your Packer template -when you push. **If you have sensitive data in your Packer template, you should -move that data into Packer variables or environment variables!** +When you use push to run a build in Atlas, you may also want to store your build artifacts in Atlas. In order to do that you will also need to configure the [Atlas post-processor](/docs/post-processors/atlas.html). This is optional, and both the post-processor and push commands can be used independently. -For the `push` command to work, the [push -configuration](/docs/templates/push.html) must be completed within the template. +!> The push command uploads your template and other files, like provisioning scripts, to Atlas. Take care not to upload files that you don't intend to, like secrets or large binaries. **If you have secrets in your Packer template, you should [move them into environment variables](https://packer.io/docs/templates/user-variables.html).** + +Most push behavior is [configured in your packer template](/docs/templates/push.html). You can override or supplement your configuration using the options below. ## Options @@ -30,12 +23,16 @@ configuration](/docs/templates/push.html) must be completed within the template. template much like a VCS commit message. This message will be passed to the Packer build service. This option is also available as a short option `-m`. -- `-token` - An access token for authenticating the push to the Packer build - service such as Atlas. This can also be specified within the push - configuration in the template. +- `-token` - Your access token for the Atlas API. + +-> Login to Atlas to [generate an Atlas Token](https://atlas.hashicorp.com/settings/tokens). The most convenient way to configure your token is to set it to the `ATLAS_TOKEN` environment variable, but you can also use `-token` on the command line. - `-name` - The name of the build in the service. This typically looks like - `hashicorp/precise64`. + `hashicorp/precise64`, which follows the form `/`. This must be specified here or in your template. + +- `-var` - Set a variable in your packer template. This option can be used multiple times. This is useful for setting version numbers for your build. + +- `-var-file` - Set template variables from a file. ## Examples From a77ee557ac8dff754a025ffcc827cd91436fa443 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 24 Jul 2015 14:49:22 -0700 Subject: [PATCH 024/100] Starting rework of atlas post-processor page --- .../docs/post-processors/atlas.html.markdown | 22 +++++++------------ 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index 4f2cb3640..8839830d2 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -11,9 +11,9 @@ page_title: 'Atlas Post-Processor' Type: `atlas` -The Atlas post-processor for Packer receives an artifact from a Packer build and -uploads it to Atlas. [Atlas](https://atlas.hashicorp.com) hosts and serves -artifacts, allowing you to version and distribute them in a simple way. +The Atlas post-processor uploads artifacts from your packer builds to Atlas for hosting. Artifacts hosted in Atlas are are automatically made available for use with Vagrant and Terraform, and Atlas provides additional features for managing versions and releases. [Learn more about packer in Atlas.](https://atlas.hashicorp.com/help/packer/features) + +You can also use the push command to [run packer builds in Atlas](/docs/command-line/push.html). The push command and Atlas post-processor can be used together or independently. ## Workflow @@ -25,8 +25,7 @@ location in Atlas. Here is an example workflow: -1. Packer builds an AMI with the [Amazon AMI - builder](/docs/builders/amazon.html) +1. Packer builds an AMI with the [Amazon AMI builder](/docs/builders/amazon.html) 2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas. The `atlas` post-processor is configured with the name of the AMI, for example `hashicorp/foobar`, to create the artifact in Atlas or update the @@ -40,24 +39,19 @@ The configuration allows you to specify and access the artifact in Atlas. ### Required: -- `token` (string) - Your access token for the Atlas API. This can be - generated on your [tokens - page](https://atlas.hashicorp.com/settings/tokens). Alternatively you can - export your Atlas token as an environmental variable and remove it from - the configuration. +- `token` (string) - Your access token for the Atlas API. + +-> Login to Atlas to [generate an Atlas Token](https://atlas.hashicorp.com/settings/tokens). The most convenient way to configure your token is to set it to the `ATLAS_TOKEN` environment variable, but you can also use `token` configuration option. - `artifact` (string) - The shorthand tag for your artifact that maps to Atlas, i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`. - You must have access to the organization, hashicorp in this example, in + You must have access to the organization—hashicorp in this example—in order to add an artifact to the organization in Atlas. - `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will always be `amazon.ami`. This field must be defined because Atlas can host other artifact types, such as Vagrant boxes. --> **Note:** If you want to upload Vagrant boxes to Atlas, use the [Atlas -post-processor](/docs/post-processors/atlas.html). - ### Optional: - `atlas_url` (string) - Override the base URL for Atlas. This is useful if From 7a6eb966c0d85a115990fa76e13704b376464b1a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 24 Jul 2015 14:49:44 -0700 Subject: [PATCH 025/100] We actually use PACKER_ACC not TF_ACC --- helper/builder/testing/testing.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helper/builder/testing/testing.go b/helper/builder/testing/testing.go index 522d7a265..0bfb136ae 100644 --- a/helper/builder/testing/testing.go +++ b/helper/builder/testing/testing.go @@ -64,7 +64,7 @@ type TestT interface { // Test performs an acceptance test on a backend with the given test case. // -// Tests are not run unless an environmental variable "TF_ACC" is +// Tests are not run unless an environmental variable "PACKER_ACC" is // set to some non-empty value. This is to avoid test cases surprising // a user by creating real resources. // From 30850b851d6dcad5979abd6d9787918c9e2a3c0f Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 24 Jul 2015 14:54:46 -0700 Subject: [PATCH 026/100] Reformat --- .../docs/command-line/push.html.markdown | 38 ++++++++++++++----- .../docs/post-processors/atlas.html.markdown | 22 ++++++++--- 2 files changed, 45 insertions(+), 15 deletions(-) diff --git a/website/source/docs/command-line/push.html.markdown b/website/source/docs/command-line/push.html.markdown index 96e5b3e20..06e5a3c98 100644 --- a/website/source/docs/command-line/push.html.markdown +++ b/website/source/docs/command-line/push.html.markdown @@ -1,21 +1,36 @@ --- description: | - The `packer push` command uploads a template and other required files to the Atlas build service, which will run your packer build for you. + The `packer push` command uploads a template and other required files to the + Atlas build service, which will run your packer build for you. layout: docs page_title: 'Push - Command-Line' ... # Command-Line: Push -The `packer push` command uploads a template and other required files to the Atlas service, which will run your packer build for you. [Learn more about Packer in Atlas.](https://atlas.hashicorp.com/help/packer/features) +The `packer push` command uploads a template and other required files to the +Atlas service, which will run your packer build for you. [Learn more about +Packer in Atlas.](https://atlas.hashicorp.com/help/packer/features) -Running builds remotely makes it easier to iterate on packer builds that are not supported on your operating system, for example, building docker or QEMU while developing on Mac or Windows. Also, the hard work of building VMs is offloaded to dedicated servers with more CPU, memory, and network resources. +Running builds remotely makes it easier to iterate on packer builds that are not +supported on your operating system, for example, building docker or QEMU while +developing on Mac or Windows. Also, the hard work of building VMs is offloaded +to dedicated servers with more CPU, memory, and network resources. -When you use push to run a build in Atlas, you may also want to store your build artifacts in Atlas. In order to do that you will also need to configure the [Atlas post-processor](/docs/post-processors/atlas.html). This is optional, and both the post-processor and push commands can be used independently. +When you use push to run a build in Atlas, you may also want to store your build +artifacts in Atlas. In order to do that you will also need to configure the +[Atlas post-processor](/docs/post-processors/atlas.html). This is optional, and +both the post-processor and push commands can be used independently. -!> The push command uploads your template and other files, like provisioning scripts, to Atlas. Take care not to upload files that you don't intend to, like secrets or large binaries. **If you have secrets in your Packer template, you should [move them into environment variables](https://packer.io/docs/templates/user-variables.html).** +!> The push command uploads your template and other files, like provisioning +scripts, to Atlas. Take care not to upload files that you don't intend to, like +secrets or large binaries. **If you have secrets in your Packer template, you +should [move them into environment +variables](https://packer.io/docs/templates/user-variables.html).** -Most push behavior is [configured in your packer template](/docs/templates/push.html). You can override or supplement your configuration using the options below. +Most push behavior is [configured in your packer +template](/docs/templates/push.html). You can override or supplement your +configuration using the options below. ## Options @@ -25,12 +40,17 @@ Most push behavior is [configured in your packer template](/docs/templates/push. - `-token` - Your access token for the Atlas API. --> Login to Atlas to [generate an Atlas Token](https://atlas.hashicorp.com/settings/tokens). The most convenient way to configure your token is to set it to the `ATLAS_TOKEN` environment variable, but you can also use `-token` on the command line. +-> Login to Atlas to [generate an Atlas +Token](https://atlas.hashicorp.com/settings/tokens). The most convenient way to +configure your token is to set it to the `ATLAS_TOKEN` environment variable, but +you can also use `-token` on the command line. - `-name` - The name of the build in the service. This typically looks like - `hashicorp/precise64`, which follows the form `/`. This must be specified here or in your template. + `hashicorp/precise64`, which follows the form `/`. This + must be specified here or in your template. -- `-var` - Set a variable in your packer template. This option can be used multiple times. This is useful for setting version numbers for your build. +- `-var` - Set a variable in your packer template. This option can be used + multiple times. This is useful for setting version numbers for your build. - `-var-file` - Set template variables from a file. diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index 8839830d2..435bec7c4 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -11,9 +11,15 @@ page_title: 'Atlas Post-Processor' Type: `atlas` -The Atlas post-processor uploads artifacts from your packer builds to Atlas for hosting. Artifacts hosted in Atlas are are automatically made available for use with Vagrant and Terraform, and Atlas provides additional features for managing versions and releases. [Learn more about packer in Atlas.](https://atlas.hashicorp.com/help/packer/features) +The Atlas post-processor uploads artifacts from your packer builds to Atlas for +hosting. Artifacts hosted in Atlas are are automatically made available for use +with Vagrant and Terraform, and Atlas provides additional features for managing +versions and releases. [Learn more about packer in +Atlas.](https://atlas.hashicorp.com/help/packer/features) -You can also use the push command to [run packer builds in Atlas](/docs/command-line/push.html). The push command and Atlas post-processor can be used together or independently. +You can also use the push command to [run packer builds in +Atlas](/docs/command-line/push.html). The push command and Atlas post-processor +can be used together or independently. ## Workflow @@ -25,7 +31,8 @@ location in Atlas. Here is an example workflow: -1. Packer builds an AMI with the [Amazon AMI builder](/docs/builders/amazon.html) +1. Packer builds an AMI with the [Amazon AMI + builder](/docs/builders/amazon.html) 2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas. The `atlas` post-processor is configured with the name of the AMI, for example `hashicorp/foobar`, to create the artifact in Atlas or update the @@ -41,12 +48,15 @@ The configuration allows you to specify and access the artifact in Atlas. - `token` (string) - Your access token for the Atlas API. --> Login to Atlas to [generate an Atlas Token](https://atlas.hashicorp.com/settings/tokens). The most convenient way to configure your token is to set it to the `ATLAS_TOKEN` environment variable, but you can also use `token` configuration option. +-> Login to Atlas to [generate an Atlas +Token](https://atlas.hashicorp.com/settings/tokens). The most convenient way to +configure your token is to set it to the `ATLAS_TOKEN` environment variable, but +you can also use `token` configuration option. - `artifact` (string) - The shorthand tag for your artifact that maps to Atlas, i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`. - You must have access to the organization—hashicorp in this example—in - order to add an artifact to the organization in Atlas. + You must have access to the organization—hashicorp in this example—in order + to add an artifact to the organization in Atlas. - `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will always be `amazon.ami`. This field must be defined because Atlas can host From 64604ee955bcef6b05f0366cb79ecea296e72ec1 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 24 Jul 2015 18:41:42 -0700 Subject: [PATCH 027/100] More succinct phrasing for cracklib conflict --- .../intro/getting-started/setup.html.markdown | 42 +++++-------------- 1 file changed, 11 insertions(+), 31 deletions(-) diff --git a/website/source/intro/getting-started/setup.html.markdown b/website/source/intro/getting-started/setup.html.markdown index 5e4734e08..ba7d95cf0 100644 --- a/website/source/intro/getting-started/setup.html.markdown +++ b/website/source/intro/getting-started/setup.html.markdown @@ -58,34 +58,6 @@ If you get an error that `packer` could not be found, then your PATH environment variable was not setup properly. Please go back and ensure that your PATH variable contains the directory which has Packer installed. -The `packer` binary may conflict with the cracklib-supplied packer binary -on RPM-based systems like Fedora, RHEL or CentOS. If this happens, running -`packer` will result in no output or something like this: - -```text -$ packer -/usr/share/cracklib/pw_dict.pwd: Permission denied -/usr/share/cracklib/pw_dict: Permission denied -``` - -In this case you may wish to symlink the `packer` binary to `packer.io` -and use that instead. e.g. - -```text -ln -s /usr/local/bin/packer /usr/local/bin/packer.io -``` - -Then replace `packer` with `packer.io` when following the rest of the -documentation. - -Alternatively you could change your `$PATH` so that the right packer -binary is selected first, however this may cause issues when attempting -to change passwords in the future. - -```text -export PATH="/path/to/packer/directory:$PATH" -``` - Otherwise, Packer is installed and you're ready to go! ## Alternative Installation Methods @@ -97,6 +69,14 @@ are alternatives available. If you're using OS X and [Homebrew](http://brew.sh), you can install Packer: -```text -$ brew install packer -``` + $ brew install packer + +## Troubleshooting + +On some RedHat-based Linux distributions there is another tool named `packer` installed by default. You can check for this using `which -a packer`. If you get an error like this it indicates there is a name conflict. + + $ packer + /usr/share/cracklib/pw_dict.pwd: Permission denied + /usr/share/cracklib/pw_dict: Permission denied + +To fix this, you can create a symlink to packer that uses a different name like `packer.io`, or invoke the `packer` binary you want using its absolute path, e.g. `/usr/local/packer`. From b533a4b833d87359d9c436796f7b34850465ca79 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 24 Jul 2015 18:55:36 -0700 Subject: [PATCH 028/100] Added a note on permissions required for IAM roles. Thanks @bmatsuo --- .../source/docs/builders/amazon.html.markdown | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/website/source/docs/builders/amazon.html.markdown b/website/source/docs/builders/amazon.html.markdown index ad336ad1c..8d6c07543 100644 --- a/website/source/docs/builders/amazon.html.markdown +++ b/website/source/docs/builders/amazon.html.markdown @@ -70,3 +70,24 @@ The following policy document provides the minimal set permissions necessary for }] } ``` + +## Troubleshooting + +### Attaching IAM Policies to Roles + +IAM policies can be associated with user or roles. If you use packer with IAM roles, you may encounter an error like this one: + + ==> amazon-ebs: Error launching source instance: You are not authorized to perform this operation. + +You can read more about why this happens on the [Amazon Security Blog](http://blogs.aws.amazon.com/security/post/Tx3M0IFB5XBOCQX/Granting-Permission-to-Launch-EC2-Instances-with-IAM-Roles-PassRole-Permission). The example policy below may help packer work with IAM roles. Note that this example provides more than the minimal set of permissions needed for packer to work, but specifics will depend on your use-case. + +```json +{ + "Sid": "PackerIAMPassRole", + "Effect": "Allow", + "Action": "iam:PassRole", + "Resource": [ + "*" + ] +} +``` From c3e39c2f0d5f0a8c3486c68456c6ad0ce062c7f6 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 24 Jul 2015 20:38:14 -0700 Subject: [PATCH 029/100] Updated docs on how AWS credentials are resolved --- .../docs/builders/amazon-chroot.html.markdown | 7 ++--- .../docs/builders/amazon-ebs.html.markdown | 8 ++--- .../builders/amazon-instance.html.markdown | 9 ++---- .../source/docs/builders/amazon.html.markdown | 29 +++++++++++++++++++ 4 files changed, 35 insertions(+), 18 deletions(-) diff --git a/website/source/docs/builders/amazon-chroot.html.markdown b/website/source/docs/builders/amazon-chroot.html.markdown index b3d1644dd..7e1a23ccb 100644 --- a/website/source/docs/builders/amazon-chroot.html.markdown +++ b/website/source/docs/builders/amazon-chroot.html.markdown @@ -57,10 +57,7 @@ can be configured for this builder. ### Required: -* `access_key` (string) - The access key used to communicate with AWS. - If not specified, Packer will search the standard [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file using environment variable `AWS_PROFILE` as the profile name, will use the `[default]` entry, - or will fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY`. - Finally, if Packer is running on an EC2 instance it will check the instance metadata for IAM role keys. +* `access_key` (string) - The access key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) * `ami_name` (string) - The name of the resulting AMI that will appear when managing AMIs in the AWS console or via APIs. This must be unique. @@ -68,7 +65,7 @@ can be configured for this builder. [configuration templates](/docs/templates/configuration-templates.html) for more info) * `secret_key` (string) - The secret key used to communicate with AWS. - Lookup behavior is as above for `access_key` except the variables are `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY`. + [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) * `source_ami` (string) - The source AMI whose root volume will be copied and provisioned on the currently running instance. This must be an diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index fc78901a6..6413899c8 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -37,10 +37,7 @@ can be configured for this builder. ### Required: -* `access_key` (string) - The access key used to communicate with AWS. - If not specified, Packer will search the standard [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file using environment variable `AWS_PROFILE` as the profile name, will use the `[default]` entry, - or will fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY`. - Finally, if Packer is running on an EC2 instance it will check the instance metadata for IAM role keys. +* `access_key` (string) - The access key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) * `ami_name` (string) - The name of the resulting AMI that will appear when managing AMIs in the AWS console or via APIs. This must be unique. @@ -53,8 +50,7 @@ can be configured for this builder. * `region` (string) - The name of the region, such as "us-east-1", in which to launch the EC2 instance to create the AMI. -* `secret_key` (string) - The secret key used to communicate with AWS. - Lookup behavior is as above for `access_key` except the variables are `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` +* `secret_key` (string) - The secret key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) * `source_ami` (string) - The initial AMI used as a base for the newly created machine. diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index 81e425c9a..565d77594 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -42,10 +42,7 @@ can be configured for this builder. ### Required: -* `access_key` (string) - The access key used to communicate with AWS. - If not specified, Packer will search the standard [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file using environment variable `AWS_PROFILE` as the profile name, will use the `[default]` entry, - or will fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY`. - Finally, if Packer is running on an EC2 instance it will check the instance metadata for IAM role keys. +* `access_key` (string) - The access key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) * `account_id` (string) - Your AWS account ID. This is required for bundling the AMI. This is _not the same_ as the access key. You can find your @@ -65,9 +62,7 @@ can be configured for this builder. * `s3_bucket` (string) - The name of the S3 bucket to upload the AMI. This bucket will be created if it doesn't exist. -* `secret_key` (string) - The secret key used to communicate with AWS. - Lookup behavior is as above for `access_key` except the variables are `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` - +* `secret_key` (string) - The secret key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) * `source_ami` (string) - The initial AMI used as a base for the newly created machine. diff --git a/website/source/docs/builders/amazon.html.markdown b/website/source/docs/builders/amazon.html.markdown index 783018d95..736f61068 100644 --- a/website/source/docs/builders/amazon.html.markdown +++ b/website/source/docs/builders/amazon.html.markdown @@ -31,6 +31,35 @@ AMI. Packer supports the following builders at the moment: [amazon-ebs builder](/docs/builders/amazon-ebs.html). It is much easier to use and Amazon generally recommends EBS-backed images nowadays. +
    ## Specifying Amazon Credentials
    + +When you use any of the amazon builders, you must provide credentials to the API in the form of an access key id and secret. These look like: + + access key id: AKIAIOSFODNN7EXAMPLE + secret access key: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY + +If you use other AWS tools you may already have these configured. If so, packer will try to use them, *unless* they are specified in your packer template. Credentials are resolved in the following order: + +1. Values hard-coded in the packer template are always authoritative. +2. *Variables* in the packer template may be resolved from command-line flags or from environment variables. Please read about [User Variables](https://packer.io/docs/templates/user-variables.html) for details. +3. If no credentials are found, packer falls back to automatic lookup. + +### Automatic Lookup + +If no AWS credentials are found in a packer template, we proceed on to the following steps: + +1. Lookup via environment variables. + - First `AWS_ACCESS_KEY_ID`, then `AWS_ACCESS_KEY` + - First `AWS_SECRET_ACCESS_KEY`, then `AWS_SECRET_KEY` +2. Look for [local AWS configuration files](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + - First `~/.aws/credentials` + - Next based on `AWS_PROFILE` +3. Lookup an IAM role for the current EC2 instance (if you're running in EC2) + +~> **Subtle details of automatic lookup may change over time.** The most reliable way to specify your configuration is by setting them in template variables (directly or indirectly), or by using the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables. + +Environment variables provide the best portability, allowing you to run your packer build on your workstation, in Atlas, or on another build server. + ## Using an IAM Instance Profile If AWS keys are not specified in the template, Packer will consult the [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file, try the standard AWS environment variables, and then From 3fe2d2f5bc49406bb5d8a68134a2a05208f1902a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 24 Jul 2015 20:55:08 -0700 Subject: [PATCH 030/100] Reformat --- .../docs/builders/amazon-chroot.html.markdown | 21 ++-- .../docs/builders/amazon-ebs.html.markdown | 30 +++--- .../builders/amazon-instance.html.markdown | 53 +++++----- .../source/docs/builders/amazon.html.markdown | 97 ++++++++++++------- .../intro/getting-started/setup.html.markdown | 8 +- 5 files changed, 121 insertions(+), 88 deletions(-) diff --git a/website/source/docs/builders/amazon-chroot.html.markdown b/website/source/docs/builders/amazon-chroot.html.markdown index fb824d488..8cc633caa 100644 --- a/website/source/docs/builders/amazon-chroot.html.markdown +++ b/website/source/docs/builders/amazon-chroot.html.markdown @@ -60,19 +60,20 @@ builder. ### Required: -* `access_key` (string) - The access key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) +- `access_key` (string) - The access key used to communicate with AWS. [Learn + how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) -* `ami_name` (string) - The name of the resulting AMI that will appear - when managing AMIs in the AWS console or via APIs. This must be unique. - To help make this unique, use a function like `timestamp` (see - [configuration templates](/docs/templates/configuration-templates.html) for more info) +- `ami_name` (string) - The name of the resulting AMI that will appear when + managing AMIs in the AWS console or via APIs. This must be unique. To help + make this unique, use a function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -* `secret_key` (string) - The secret key used to communicate with AWS. - [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) +- `secret_key` (string) - The secret key used to communicate with AWS. [Learn + how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) -* `source_ami` (string) - The source AMI whose root volume will be copied - and provisioned on the currently running instance. This must be an - EBS-backed AMI with a root volume snapshot that you have access to. +- `source_ami` (string) - The source AMI whose root volume will be copied and + provisioned on the currently running instance. This must be an EBS-backed + AMI with a root volume snapshot that you have access to. ### Optional: diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index e89f525e6..f97404d19 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -40,26 +40,28 @@ builder. ### Required: -* `access_key` (string) - The access key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) +- `access_key` (string) - The access key used to communicate with AWS. [Learn + how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) -* `ami_name` (string) - The name of the resulting AMI that will appear - when managing AMIs in the AWS console or via APIs. This must be unique. - To help make this unique, use a function like `timestamp` (see - [configuration templates](/docs/templates/configuration-templates.html) for more info) +- `ami_name` (string) - The name of the resulting AMI that will appear when + managing AMIs in the AWS console or via APIs. This must be unique. To help + make this unique, use a function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -* `instance_type` (string) - The EC2 instance type to use while building - the AMI, such as "m1.small". +- `instance_type` (string) - The EC2 instance type to use while building the + AMI, such as "m1.small". -* `region` (string) - The name of the region, such as "us-east-1", in which - to launch the EC2 instance to create the AMI. +- `region` (string) - The name of the region, such as "us-east-1", in which to + launch the EC2 instance to create the AMI. -* `secret_key` (string) - The secret key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) +- `secret_key` (string) - The secret key used to communicate with AWS. [Learn + how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) -* `source_ami` (string) - The initial AMI used as a base for the newly - created machine. +- `source_ami` (string) - The initial AMI used as a base for the newly + created machine. -* `ssh_username` (string) - The username to use in order to communicate - over SSH to the running machine. +- `ssh_username` (string) - The username to use in order to communicate over + SSH to the running machine. ### Optional: diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index cec98a5c0..13ab1f293 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -45,41 +45,44 @@ builder. ### Required: -* `access_key` (string) - The access key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) +- `access_key` (string) - The access key used to communicate with AWS. [Learn + how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) -* `account_id` (string) - Your AWS account ID. This is required for bundling - the AMI. This is _not the same_ as the access key. You can find your - account ID in the security credentials page of your AWS account. +- `account_id` (string) - Your AWS account ID. This is required for bundling + the AMI. This is *not the same* as the access key. You can find your account + ID in the security credentials page of your AWS account. -* `ami_name` (string) - The name of the resulting AMI that will appear - when managing AMIs in the AWS console or via APIs. This must be unique. - To help make this unique, use a function like `timestamp` (see - [configuration templates](/docs/templates/configuration-templates.html) for more info) +- `ami_name` (string) - The name of the resulting AMI that will appear when + managing AMIs in the AWS console or via APIs. This must be unique. To help + make this unique, use a function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -* `instance_type` (string) - The EC2 instance type to use while building - the AMI, such as "m1.small". +- `instance_type` (string) - The EC2 instance type to use while building the + AMI, such as "m1.small". -* `region` (string) - The name of the region, such as "us-east-1", in which - to launch the EC2 instance to create the AMI. +- `region` (string) - The name of the region, such as "us-east-1", in which to + launch the EC2 instance to create the AMI. -* `s3_bucket` (string) - The name of the S3 bucket to upload the AMI. - This bucket will be created if it doesn't exist. +- `s3_bucket` (string) - The name of the S3 bucket to upload the AMI. This + bucket will be created if it doesn't exist. -* `secret_key` (string) - The secret key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) +- `secret_key` (string) - The secret key used to communicate with AWS. [Learn + how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) -* `source_ami` (string) - The initial AMI used as a base for the newly - created machine. +- `source_ami` (string) - The initial AMI used as a base for the newly + created machine. -* `ssh_username` (string) - The username to use in order to communicate - over SSH to the running machine. +- `ssh_username` (string) - The username to use in order to communicate over + SSH to the running machine. -* `x509_cert_path` (string) - The local path to a valid X509 certificate for - your AWS account. This is used for bundling the AMI. This X509 certificate - must be registered with your account from the security credentials page - in the AWS console. +- `x509_cert_path` (string) - The local path to a valid X509 certificate for + your AWS account. This is used for bundling the AMI. This X509 certificate + must be registered with your account from the security credentials page in + the AWS console. -* `x509_key_path` (string) - The local path to the private key for the X509 - certificate specified by `x509_cert_path`. This is used for bundling the AMI. +- `x509_key_path` (string) - The local path to the private key for the X509 + certificate specified by `x509_cert_path`. This is used for bundling + the AMI. ### Optional: diff --git a/website/source/docs/builders/amazon.html.markdown b/website/source/docs/builders/amazon.html.markdown index c81e463ec..3eb79ac1e 100644 --- a/website/source/docs/builders/amazon.html.markdown +++ b/website/source/docs/builders/amazon.html.markdown @@ -9,57 +9,75 @@ page_title: Amazon AMI Builder # Amazon AMI Builder Packer is able to create Amazon AMIs. To achieve this, Packer comes with -multiple builders depending on the strategy you want to use to build the -AMI. Packer supports the following builders at the moment: +multiple builders depending on the strategy you want to use to build the AMI. +Packer supports the following builders at the moment: -* [amazon-ebs](/docs/builders/amazon-ebs.html) - Create EBS-backed AMIs - by launching a source AMI and re-packaging it into a new AMI after - provisioning. If in doubt, use this builder, which is the easiest to get - started with. +- [amazon-ebs](/docs/builders/amazon-ebs.html) - Create EBS-backed AMIs by + launching a source AMI and re-packaging it into a new AMI + after provisioning. If in doubt, use this builder, which is the easiest to + get started with. -* [amazon-instance](/docs/builders/amazon-instance.html) - Create - instance-store AMIs by launching and provisioning a source instance, then - rebundling it and uploading it to S3. +- [amazon-instance](/docs/builders/amazon-instance.html) - Create + instance-store AMIs by launching and provisioning a source instance, then + rebundling it and uploading it to S3. -* [amazon-chroot](/docs/builders/amazon-chroot.html) - Create EBS-backed AMIs - from an existing EC2 instance by mounting the root device and using a - [Chroot](http://en.wikipedia.org/wiki/Chroot) environment to provision - that device. This is an **advanced builder and should not be used by - newcomers**. However, it is also the fastest way to build an EBS-backed - AMI since no new EC2 instance needs to be launched. +- [amazon-chroot](/docs/builders/amazon-chroot.html) - Create EBS-backed AMIs + from an existing EC2 instance by mounting the root device and using a + [Chroot](http://en.wikipedia.org/wiki/Chroot) environment to provision + that device. This is an **advanced builder and should not be used by + newcomers**. However, it is also the fastest way to build an EBS-backed AMI + since no new EC2 instance needs to be launched. --> **Don't know which builder to use?** If in doubt, use the -[amazon-ebs builder](/docs/builders/amazon-ebs.html). It is -much easier to use and Amazon generally recommends EBS-backed images nowadays. +-> **Don't know which builder to use?** If in doubt, use the [amazon-ebs +builder](/docs/builders/amazon-ebs.html). It is much easier to use and Amazon +generally recommends EBS-backed images nowadays. -
    ## Specifying Amazon Credentials
    +
    -When you use any of the amazon builders, you must provide credentials to the API in the form of an access key id and secret. These look like: +\#\# Specifying Amazon Credentials + +
    + +When you use any of the amazon builders, you must provide credentials to the API +in the form of an access key id and secret. These look like: access key id: AKIAIOSFODNN7EXAMPLE secret access key: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY -If you use other AWS tools you may already have these configured. If so, packer will try to use them, *unless* they are specified in your packer template. Credentials are resolved in the following order: +If you use other AWS tools you may already have these configured. If so, packer +will try to use them, *unless* they are specified in your packer template. +Credentials are resolved in the following order: -1. Values hard-coded in the packer template are always authoritative. -2. *Variables* in the packer template may be resolved from command-line flags or from environment variables. Please read about [User Variables](https://packer.io/docs/templates/user-variables.html) for details. -3. If no credentials are found, packer falls back to automatic lookup. +1. Values hard-coded in the packer template are always authoritative. +2. *Variables* in the packer template may be resolved from command-line flags + or from environment variables. Please read about [User + Variables](https://packer.io/docs/templates/user-variables.html) + for details. +3. If no credentials are found, packer falls back to automatic lookup. ### Automatic Lookup -If no AWS credentials are found in a packer template, we proceed on to the following steps: +If no AWS credentials are found in a packer template, we proceed on to the +following steps: -1. Lookup via environment variables. - - First `AWS_ACCESS_KEY_ID`, then `AWS_ACCESS_KEY` - - First `AWS_SECRET_ACCESS_KEY`, then `AWS_SECRET_KEY` -2. Look for [local AWS configuration files](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) - - First `~/.aws/credentials` - - Next based on `AWS_PROFILE` -3. Lookup an IAM role for the current EC2 instance (if you're running in EC2) +1. Lookup via environment variables. + - First `AWS_ACCESS_KEY_ID`, then `AWS_ACCESS_KEY` + - First `AWS_SECRET_ACCESS_KEY`, then `AWS_SECRET_KEY` -~> **Subtle details of automatic lookup may change over time.** The most reliable way to specify your configuration is by setting them in template variables (directly or indirectly), or by using the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables. +2. Look for [local AWS configuration + files](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + - First `~/.aws/credentials` + - Next based on `AWS_PROFILE` -Environment variables provide the best portability, allowing you to run your packer build on your workstation, in Atlas, or on another build server. +3. Lookup an IAM role for the current EC2 instance (if you're running in EC2) + +\~> **Subtle details of automatic lookup may change over time.** The most +reliable way to specify your configuration is by setting them in template +variables (directly or indirectly), or by using the `AWS_ACCESS_KEY_ID` and +`AWS_SECRET_ACCESS_KEY` environment variables. + +Environment variables provide the best portability, allowing you to run your +packer build on your workstation, in Atlas, or on another build server. ## Using an IAM Instance Profile @@ -108,13 +126,18 @@ Packer to work: ### Attaching IAM Policies to Roles -IAM policies can be associated with user or roles. If you use packer with IAM roles, you may encounter an error like this one: +IAM policies can be associated with user or roles. If you use packer with IAM +roles, you may encounter an error like this one: ==> amazon-ebs: Error launching source instance: You are not authorized to perform this operation. -You can read more about why this happens on the [Amazon Security Blog](http://blogs.aws.amazon.com/security/post/Tx3M0IFB5XBOCQX/Granting-Permission-to-Launch-EC2-Instances-with-IAM-Roles-PassRole-Permission). The example policy below may help packer work with IAM roles. Note that this example provides more than the minimal set of permissions needed for packer to work, but specifics will depend on your use-case. +You can read more about why this happens on the [Amazon Security +Blog](http://blogs.aws.amazon.com/security/post/Tx3M0IFB5XBOCQX/Granting-Permission-to-Launch-EC2-Instances-with-IAM-Roles-PassRole-Permission). +The example policy below may help packer work with IAM roles. Note that this +example provides more than the minimal set of permissions needed for packer to +work, but specifics will depend on your use-case. -```json +``` {.json} { "Sid": "PackerIAMPassRole", "Effect": "Allow", diff --git a/website/source/intro/getting-started/setup.html.markdown b/website/source/intro/getting-started/setup.html.markdown index 2151019c4..181f93edb 100644 --- a/website/source/intro/getting-started/setup.html.markdown +++ b/website/source/intro/getting-started/setup.html.markdown @@ -77,10 +77,14 @@ If you're using OS X and [Homebrew](http://brew.sh), you can install Packer: ## Troubleshooting -On some RedHat-based Linux distributions there is another tool named `packer` installed by default. You can check for this using `which -a packer`. If you get an error like this it indicates there is a name conflict. +On some RedHat-based Linux distributions there is another tool named `packer` +installed by default. You can check for this using `which -a packer`. If you get +an error like this it indicates there is a name conflict. $ packer /usr/share/cracklib/pw_dict.pwd: Permission denied /usr/share/cracklib/pw_dict: Permission denied -To fix this, you can create a symlink to packer that uses a different name like `packer.io`, or invoke the `packer` binary you want using its absolute path, e.g. `/usr/local/packer`. +To fix this, you can create a symlink to packer that uses a different name like +`packer.io`, or invoke the `packer` binary you want using its absolute path, +e.g. `/usr/local/packer`. From 54afe10ad10f8fc841d1f4f46a4a5feeca977d7c Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 24 Jul 2015 20:59:40 -0700 Subject: [PATCH 031/100] Make the anchor work with the reformatter --- website/source/docs/builders/amazon.html.markdown | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/website/source/docs/builders/amazon.html.markdown b/website/source/docs/builders/amazon.html.markdown index 3eb79ac1e..a85e22d1a 100644 --- a/website/source/docs/builders/amazon.html.markdown +++ b/website/source/docs/builders/amazon.html.markdown @@ -32,11 +32,9 @@ Packer supports the following builders at the moment: builder](/docs/builders/amazon-ebs.html). It is much easier to use and Amazon generally recommends EBS-backed images nowadays. -
    + -\#\# Specifying Amazon Credentials - -
    +## Specifying Amazon Credentials When you use any of the amazon builders, you must provide credentials to the API in the form of an access key id and secret. These look like: From e0be4efefef5ce6037d77ad996051b34db3753b6 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 24 Jul 2015 21:00:24 -0700 Subject: [PATCH 032/100] Make the anchor work with reformat --- website/source/docs/builders/amazon.html.markdown | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/website/source/docs/builders/amazon.html.markdown b/website/source/docs/builders/amazon.html.markdown index 3eb79ac1e..a85e22d1a 100644 --- a/website/source/docs/builders/amazon.html.markdown +++ b/website/source/docs/builders/amazon.html.markdown @@ -32,11 +32,9 @@ Packer supports the following builders at the moment: builder](/docs/builders/amazon-ebs.html). It is much easier to use and Amazon generally recommends EBS-backed images nowadays. -
    + -\#\# Specifying Amazon Credentials - -
    +## Specifying Amazon Credentials When you use any of the amazon builders, you must provide credentials to the API in the form of an access key id and secret. These look like: From 88ebc2f7e8a175c292db122be7e67602b0afca64 Mon Sep 17 00:00:00 2001 From: Gonzalo Peci Date: Tue, 14 Jul 2015 15:19:24 +1200 Subject: [PATCH 033/100] Add s.SSHPort variable as the port WinRM uses to connect. This is needed on any builder where the port used to connect is not the guest winrm port but a nated port on the host. Similar behavior is used by the SSH communicator. --- helper/communicator/step_connect.go | 1 + helper/communicator/step_connect_winrm.go | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/helper/communicator/step_connect.go b/helper/communicator/step_connect.go index 0c1522330..e72be3ba8 100644 --- a/helper/communicator/step_connect.go +++ b/helper/communicator/step_connect.go @@ -53,6 +53,7 @@ func (s *StepConnect) Run(state multistep.StateBag) multistep.StepAction { Config: s.Config, Host: s.Host, WinRMConfig: s.WinRMConfig, + WinRMPort: s.SSHPort, }, } for k, v := range s.CustomConnect { diff --git a/helper/communicator/step_connect_winrm.go b/helper/communicator/step_connect_winrm.go index bdd0c1499..44244b37a 100644 --- a/helper/communicator/step_connect_winrm.go +++ b/helper/communicator/step_connect_winrm.go @@ -25,6 +25,7 @@ type StepConnectWinRM struct { Config *Config Host func(multistep.StateBag) (string, error) WinRMConfig func(multistep.StateBag) (*WinRMConfig, error) + WinRMPort func(multistep.StateBag) (int, error) } func (s *StepConnectWinRM) Run(state multistep.StateBag) multistep.StepAction { @@ -96,6 +97,13 @@ func (s *StepConnectWinRM) waitForWinRM(state multistep.StateBag, cancel <-chan continue } port := s.Config.WinRMPort + if s.WinRMPort != nil { + port, err = s.WinRMPort(state) + if err != nil { + log.Printf("[DEBUG] Error getting WinRM port: %s", err) + continue + } + } user := s.Config.WinRMUser password := s.Config.WinRMPassword From ef873ba210efde9e92c11fc89c712bd632de7bf8 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Sun, 26 Jul 2015 16:33:56 -0700 Subject: [PATCH 034/100] Update version file so builds from master don't masquerade as 0.8.2 --- version.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/version.go b/version.go index b858802e6..84958092f 100644 --- a/version.go +++ b/version.go @@ -4,9 +4,9 @@ package main var GitCommit string // The main version number that is being run at the moment. -const Version = "0.8.2" +const Version = "0.8.3" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "" +const VersionPrerelease = "dev" From e7900ec5ef9145ff52d800b0d0c0d9395a071978 Mon Sep 17 00:00:00 2001 From: Brian Fletcher Date: Mon, 27 Jul 2015 21:00:11 +0100 Subject: [PATCH 035/100] Fix example code for digital ocean builder Remove command after size --- .../source/intro/getting-started/parallel-builds.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/intro/getting-started/parallel-builds.html.markdown b/website/source/intro/getting-started/parallel-builds.html.markdown index 626033ef2..57b689d7e 100644 --- a/website/source/intro/getting-started/parallel-builds.html.markdown +++ b/website/source/intro/getting-started/parallel-builds.html.markdown @@ -67,7 +67,7 @@ array. "api_token": "{{user `do_api_token`}}", "image": "ubuntu-14-04-x64", "region": "nyc3", - "size": "512mb", + "size": "512mb" } ``` From ce54dba2d3b62418fe8c23180f236241a78cac75 Mon Sep 17 00:00:00 2001 From: Hazel Smith Date: Mon, 27 Jul 2015 23:00:06 +0100 Subject: [PATCH 036/100] openstack builder: log which IP address SSH will use --- builder/openstack/ssh.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/builder/openstack/ssh.go b/builder/openstack/ssh.go index 3e7350d11..87a219b22 100644 --- a/builder/openstack/ssh.go +++ b/builder/openstack/ssh.go @@ -23,6 +23,7 @@ func CommHost( // If we have a specific interface, try that if sshinterface != "" { if addr := sshAddrFromPool(s, sshinterface); addr != "" { + log.Printf("[DEBUG] Using IP address %s from specified interface %s for SSH", addr, sshinterface) return addr, nil } } @@ -30,15 +31,18 @@ func CommHost( // If we have a floating IP, use that ip := state.Get("access_ip").(*floatingip.FloatingIP) if ip != nil && ip.IP != "" { + log.Printf("[DEBUG] Using floating IP %s for SSH", ip.IP) return ip.IP, nil } if s.AccessIPv4 != "" { + log.Printf("[DEBUG] Using AccessIPv4 %s for SSH", s.AccessIPv4) return s.AccessIPv4, nil } // Try to get it from the requested interface if addr := sshAddrFromPool(s, sshinterface); addr != "" { + log.Printf("[DEBUG] Using IP address %s for SSH", addr) return addr, nil } From b47eb4cea90a0e39b1d4ff38a5da232401304380 Mon Sep 17 00:00:00 2001 From: Hazel Smith Date: Mon, 27 Jul 2015 23:05:51 +0100 Subject: [PATCH 037/100] openstack builder: support using existing keypair --- builder/openstack/builder.go | 2 ++ builder/openstack/run_config.go | 1 + builder/openstack/step_key_pair.go | 23 +++++++++++++++++++++++ 3 files changed, 26 insertions(+) diff --git a/builder/openstack/builder.go b/builder/openstack/builder.go index d6b528695..d15713339 100644 --- a/builder/openstack/builder.go +++ b/builder/openstack/builder.go @@ -77,6 +77,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &StepKeyPair{ Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("os_%s.pem", b.config.PackerBuildName), + KeyPairName: b.config.SSHKeyPairName, + PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey, }, &StepRunSourceServer{ Name: b.config.ImageName, diff --git a/builder/openstack/run_config.go b/builder/openstack/run_config.go index 128e36b5b..00f34c9c4 100644 --- a/builder/openstack/run_config.go +++ b/builder/openstack/run_config.go @@ -11,6 +11,7 @@ import ( // image and details on how to access that launched image. type RunConfig struct { Comm communicator.Config `mapstructure:",squash"` + SSHKeyPairName string `mapstructure:"ssh_keypair_name"` SSHInterface string `mapstructure:"ssh_interface"` SourceImage string `mapstructure:"source_image"` diff --git a/builder/openstack/step_key_pair.go b/builder/openstack/step_key_pair.go index 06bcbf9ea..97dbf7515 100644 --- a/builder/openstack/step_key_pair.go +++ b/builder/openstack/step_key_pair.go @@ -2,6 +2,7 @@ package openstack import ( "fmt" + "io/ioutil" "os" "runtime" @@ -14,10 +15,27 @@ import ( type StepKeyPair struct { Debug bool DebugKeyPath string + KeyPairName string + PrivateKeyFile string + keyName string } func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { + if s.PrivateKeyFile != "" { + privateKeyBytes, err := ioutil.ReadFile(s.PrivateKeyFile) + if err != nil { + state.Put("error", fmt.Errorf( + "Error loading configured private key file: %s", err)) + return multistep.ActionHalt + } + + state.Put("keyPair", s.KeyPairName) + state.Put("privateKey", string(privateKeyBytes)) + + return multistep.ActionContinue + } + config := state.Get("config").(Config) ui := state.Get("ui").(packer.Ui) @@ -81,6 +99,11 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { } func (s *StepKeyPair) Cleanup(state multistep.StateBag) { + // If we used an SSH private key file, do not go about deleting + // keypairs + if s.PrivateKeyFile != "" { + return + } // If no key name is set, then we never created it, so just return if s.keyName == "" { return From a7da0ffde1550b249ac059f10e4b67ce6854b6f1 Mon Sep 17 00:00:00 2001 From: Hazel Smith Date: Mon, 27 Jul 2015 23:07:25 +0100 Subject: [PATCH 038/100] openstack: store updated accessIPv4 from RackConnect --- builder/openstack/step_wait_for_rackconnect.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builder/openstack/step_wait_for_rackconnect.go b/builder/openstack/step_wait_for_rackconnect.go index 6263bd17d..7ab42a8f4 100644 --- a/builder/openstack/step_wait_for_rackconnect.go +++ b/builder/openstack/step_wait_for_rackconnect.go @@ -39,6 +39,7 @@ func (s *StepWaitForRackConnect) Run(state multistep.StateBag) multistep.StepAct } if server.Metadata["rackconnect_automation_status"] == "DEPLOYED" { + state.Put("server", server) break } From 715662f60b2ae8f251766c92f7d607539cdfb650 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 27 Jul 2015 16:42:06 -0700 Subject: [PATCH 039/100] Reformat --- builder/docker/communicator.go | 4 ++-- builder/docker/step_connect_docker.go | 2 +- builder/openstack/builder.go | 8 ++++---- builder/openstack/run_config.go | 4 ++-- builder/openstack/step_key_pair.go | 10 +++++----- provisioner/chef-client/provisioner.go | 8 ++++---- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index 63ef4cd5b..4fcd9b658 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -24,8 +24,8 @@ type Communicator struct { HostDir string ContainerDir string Version *version.Version - Config *Config - lock sync.Mutex + Config *Config + lock sync.Mutex } func (c *Communicator) Start(remote *packer.RemoteCmd) error { diff --git a/builder/docker/step_connect_docker.go b/builder/docker/step_connect_docker.go index 315cfc204..f84d369c2 100644 --- a/builder/docker/step_connect_docker.go +++ b/builder/docker/step_connect_docker.go @@ -26,7 +26,7 @@ func (s *StepConnectDocker) Run(state multistep.StateBag) multistep.StepAction { HostDir: tempDir, ContainerDir: "/packer-files", Version: version, - Config: config, + Config: config, } state.Put("communicator", comm) diff --git a/builder/openstack/builder.go b/builder/openstack/builder.go index d15713339..9f4c9e7bc 100644 --- a/builder/openstack/builder.go +++ b/builder/openstack/builder.go @@ -75,10 +75,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Flavor: b.config.Flavor, }, &StepKeyPair{ - Debug: b.config.PackerDebug, - DebugKeyPath: fmt.Sprintf("os_%s.pem", b.config.PackerBuildName), - KeyPairName: b.config.SSHKeyPairName, - PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey, + Debug: b.config.PackerDebug, + DebugKeyPath: fmt.Sprintf("os_%s.pem", b.config.PackerBuildName), + KeyPairName: b.config.SSHKeyPairName, + PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey, }, &StepRunSourceServer{ Name: b.config.ImageName, diff --git a/builder/openstack/run_config.go b/builder/openstack/run_config.go index 00f34c9c4..a8b8638dc 100644 --- a/builder/openstack/run_config.go +++ b/builder/openstack/run_config.go @@ -10,9 +10,9 @@ import ( // RunConfig contains configuration for running an instance from a source // image and details on how to access that launched image. type RunConfig struct { - Comm communicator.Config `mapstructure:",squash"` + Comm communicator.Config `mapstructure:",squash"` SSHKeyPairName string `mapstructure:"ssh_keypair_name"` - SSHInterface string `mapstructure:"ssh_interface"` + SSHInterface string `mapstructure:"ssh_interface"` SourceImage string `mapstructure:"source_image"` Flavor string `mapstructure:"flavor"` diff --git a/builder/openstack/step_key_pair.go b/builder/openstack/step_key_pair.go index 97dbf7515..f17d76f35 100644 --- a/builder/openstack/step_key_pair.go +++ b/builder/openstack/step_key_pair.go @@ -13,12 +13,12 @@ import ( ) type StepKeyPair struct { - Debug bool - DebugKeyPath string - KeyPairName string - PrivateKeyFile string + Debug bool + DebugKeyPath string + KeyPairName string + PrivateKeyFile string - keyName string + keyName string } func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index 498033925..62b3732de 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -287,10 +287,10 @@ func (p *Provisioner) createKnifeConfig(ui packer.Ui, comm packer.Communicator, ctx := p.config.ctx ctx.Data = &ConfigTemplate{ - NodeName: nodeName, - ServerUrl: serverUrl, - ClientKey: clientKey, - SslVerifyMode: sslVerifyMode, + NodeName: nodeName, + ServerUrl: serverUrl, + ClientKey: clientKey, + SslVerifyMode: sslVerifyMode, } configString, err := interpolate.Render(tpl, &ctx) if err != nil { From 73a157b78d56a50478d4bc5d80d06f700274711e Mon Sep 17 00:00:00 2001 From: Patrick Lucas Date: Mon, 27 Jul 2015 19:32:21 -0700 Subject: [PATCH 040/100] builder/googlecompute: Document use_internal_ip --- website/source/docs/builders/googlecompute.markdown | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/source/docs/builders/googlecompute.markdown b/website/source/docs/builders/googlecompute.markdown index a572a0371..7e6df9823 100644 --- a/website/source/docs/builders/googlecompute.markdown +++ b/website/source/docs/builders/googlecompute.markdown @@ -129,6 +129,9 @@ can be configured for this builder. * `tags` (array of strings) +* `use_internal_ip` (boolean) - If true, use the instance's internal IP instead + of its external IP during building. + ## Gotchas Centos images have root ssh access disabled by default. Set `ssh_username` to any user, which will be created by packer with sudo access. From 21107b0027e431116a7f80af10642a8c5e84d1c8 Mon Sep 17 00:00:00 2001 From: Gonzalo Peci Date: Mon, 27 Jul 2015 12:43:05 +1200 Subject: [PATCH 041/100] Fix wrong command type being used when running elevated provisioner. --- provisioner/powershell/provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/provisioner/powershell/provisioner.go b/provisioner/powershell/provisioner.go index 31ba2b34a..a862ef9b3 100644 --- a/provisioner/powershell/provisioner.go +++ b/provisioner/powershell/provisioner.go @@ -399,7 +399,7 @@ func (p *Provisioner) createCommandText() (command string, err error) { Vars: flattenedEnvVars, Path: p.config.RemotePath, } - command, err = interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) + command, err = interpolate.Render(p.config.ElevatedExecuteCommand, &p.config.ctx) if err != nil { return "", fmt.Errorf("Error processing command: %s", err) } From f90f2f685d281d290fa5ae36f602cdb6be8fb759 Mon Sep 17 00:00:00 2001 From: AmirAli Moinfar Date: Mon, 27 Jul 2015 07:00:57 -0400 Subject: [PATCH 042/100] Fix semantic errors in messages --- provisioner/salt-masterless/provisioner.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/provisioner/salt-masterless/provisioner.go b/provisioner/salt-masterless/provisioner.go index f856ca01d..439f0e590 100644 --- a/provisioner/salt-masterless/provisioner.go +++ b/provisioner/salt-masterless/provisioner.go @@ -116,9 +116,9 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } } - ui.Message(fmt.Sprintf("Creating remote directory: %s", p.config.TempConfigDir)) + ui.Message(fmt.Sprintf("Creating remote temporary directory: %s", p.config.TempConfigDir)) if err := p.createDir(ui, comm, p.config.TempConfigDir); err != nil { - return fmt.Errorf("Error creating remote salt state directory: %s", err) + return fmt.Errorf("Error creating remote temporary directory: %s", err) } if p.config.MinionConfig != "" { @@ -216,7 +216,7 @@ func (p *Provisioner) moveFile(ui packer.Ui, comm packer.Communicator, dst, src err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus) } - return fmt.Errorf("Unable to move %s/minion to /etc/salt/minion: %s", p.config.TempConfigDir, err) + return fmt.Errorf("Unable to move %s to %s: %s", src, dst, err) } return nil } From b88afbf3c9306fd7a82d49727e8a4008b1b41ec4 Mon Sep 17 00:00:00 2001 From: AmirAli Moinfar Date: Tue, 28 Jul 2015 01:38:40 -0400 Subject: [PATCH 043/100] Revise documentation for minion config * Update link to salt minion config. * Clarify that minion config is a file. --- website/source/docs/provisioners/salt-masterless.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/provisioners/salt-masterless.html.markdown b/website/source/docs/provisioners/salt-masterless.html.markdown index 84171a071..679a0f6eb 100644 --- a/website/source/docs/provisioners/salt-masterless.html.markdown +++ b/website/source/docs/provisioners/salt-masterless.html.markdown @@ -47,7 +47,7 @@ Optional: This will be uploaded to the `/srv/salt` on the remote. - `minion_config` (string) - The path to your local [minion - config](http://docs.saltstack.com/topics/configuration.html). This will be + config file](http://docs.saltstack.com/ref/configuration/minion.html). This will be uploaded to the `/etc/salt` on the remote. - `skip_bootstrap` (boolean) - By default the salt provisioner runs [salt From 769c82b1710dc9665f6cfda64baee78c714ec1a7 Mon Sep 17 00:00:00 2001 From: AmirAli Moinfar Date: Mon, 27 Jul 2015 06:36:39 -0400 Subject: [PATCH 044/100] Support for setting salt remote directory * It is possible to set remote salt tree through `remote_state_tree` argument. * It is possible to set remote pillar root through `remote_pillar_roots` argument. * Directories `remote_state_tree` and `remote_pillar_roots` are emptied before use. --- provisioner/salt-masterless/provisioner.go | 50 +++++++++++++++++++--- 1 file changed, 43 insertions(+), 7 deletions(-) diff --git a/provisioner/salt-masterless/provisioner.go b/provisioner/salt-masterless/provisioner.go index f856ca01d..f308009fb 100644 --- a/provisioner/salt-masterless/provisioner.go +++ b/provisioner/salt-masterless/provisioner.go @@ -15,6 +15,8 @@ import ( ) const DefaultTempConfigDir = "/tmp/salt" +const DefaultStateTreeDir = "/srv/salt" +const DefaultPillarRootDir = "/srv/pillar" type Config struct { common.PackerConfig `mapstructure:",squash"` @@ -34,6 +36,12 @@ type Config struct { // Local path to the salt pillar roots LocalPillarRoots string `mapstructure:"local_pillar_roots"` + // Remote path to the salt state tree + RemoteStateTree string `mapstructure:"remote_state_tree"` + + // Remote path to the salt pillar roots + RemotePillarRoots string `mapstructure:"remote_pillar_roots"` + // Where files will be copied before moving to the /srv/salt directory TempConfigDir string `mapstructure:"temp_config_dir"` @@ -60,6 +68,14 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { p.config.TempConfigDir = DefaultTempConfigDir } + if p.config.RemoteStateTree == "" { + p.config.RemoteStateTree = DefaultStateTreeDir + } + + if p.config.RemotePillarRoots == "" { + p.config.RemotePillarRoots = DefaultPillarRootDir + } + var errs *packer.MultiError // require a salt state tree @@ -144,11 +160,14 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { return fmt.Errorf("Error uploading local state tree to remote: %s", err) } - // move state tree into /srv/salt + // move state tree from temporary directory src = filepath.ToSlash(filepath.Join(p.config.TempConfigDir, "states")) - dst = "/srv/salt" + dst = p.config.RemoteStateTree + if err = p.removeDir(ui, comm, dst); err != nil { + return fmt.Errorf("Unable to clear salt tree: %s", err) + } if err = p.moveFile(ui, comm, dst, src); err != nil { - return fmt.Errorf("Unable to move %s/states to /srv/salt: %s", p.config.TempConfigDir, err) + return fmt.Errorf("Unable to move %s/states to %s: %s", p.config.TempConfigDir, dst, err) } if p.config.LocalPillarRoots != "" { @@ -159,16 +178,19 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { return fmt.Errorf("Error uploading local pillar roots to remote: %s", err) } - // move pillar tree into /srv/pillar + // move pillar root from temporary directory src = filepath.ToSlash(filepath.Join(p.config.TempConfigDir, "pillar")) - dst = "/srv/pillar" + dst = p.config.RemotePillarRoots + if err = p.removeDir(ui, comm, dst); err != nil { + return fmt.Errorf("Unable to clear pillat root: %s", err) + } if err = p.moveFile(ui, comm, dst, src); err != nil { - return fmt.Errorf("Unable to move %s/pillar to /srv/pillar: %s", p.config.TempConfigDir, err) + return fmt.Errorf("Unable to move %s/pillar to %s: %s", p.config.TempConfigDir, dst, err) } } ui.Message("Running highstate") - cmd := &packer.RemoteCmd{Command: p.sudo("salt-call --local state.highstate -l info --retcode-passthrough")} + cmd := &packer.RemoteCmd{Command: fmt.Sprintf(p.sudo("salt-call --local state.highstate --file-root=%s --pillar-root=%s -l info --retcode-passthrough"),p.config.RemoteStateTree, p.config.RemotePillarRoots)} if err = cmd.StartWithUi(comm, ui); err != nil || cmd.ExitStatus != 0 { if err == nil { err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus) @@ -235,6 +257,20 @@ func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir stri return nil } +func (p *Provisioner) removeDir(ui packer.Ui, comm packer.Communicator, dir string) error { + ui.Message(fmt.Sprintf("Removing directory: %s", dir)) + cmd := &packer.RemoteCmd{ + Command: fmt.Sprintf("rm -rf '%s'", dir), + } + if err := cmd.StartWithUi(comm, ui); err != nil { + return err + } + if cmd.ExitStatus != 0 { + return fmt.Errorf("Non-zero exit status.") + } + return nil +} + func (p *Provisioner) uploadDir(ui packer.Ui, comm packer.Communicator, dst, src string, ignore []string) error { if err := p.createDir(ui, comm, dst); err != nil { return err From 63be0e3ea1e0c2f17915883fff18821a726682f1 Mon Sep 17 00:00:00 2001 From: AmirAli Moinfar Date: Tue, 28 Jul 2015 01:37:26 -0400 Subject: [PATCH 045/100] Add documentation for salt remote directories --- .../docs/provisioners/salt-masterless.html.markdown | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/website/source/docs/provisioners/salt-masterless.html.markdown b/website/source/docs/provisioners/salt-masterless.html.markdown index 84171a071..19242ae44 100644 --- a/website/source/docs/provisioners/salt-masterless.html.markdown +++ b/website/source/docs/provisioners/salt-masterless.html.markdown @@ -38,13 +38,21 @@ Optional: has more detailed usage instructions. By default, no arguments are sent to the script. +- `remote_pillar_roots` (string) - The path to your remote [pillar + roots](http://docs.saltstack.com/ref/configuration/master.html#pillar-configuration). + default: `/srv/pillar`. + +- `remote_state_tree` (string) - The path to your remote [state + tree](http://docs.saltstack.com/ref/states/highstate.html#the-salt-state-tree). + default: `/srv/salt`. + - `local_pillar_roots` (string) - The path to your local [pillar roots](http://docs.saltstack.com/ref/configuration/master.html#pillar-configuration). - This will be uploaded to the `/srv/pillar` on the remote. + This will be uploaded to the `remote_pillar_roots` on the remote. - `local_state_tree` (string) - The path to your local [state tree](http://docs.saltstack.com/ref/states/highstate.html#the-salt-state-tree). - This will be uploaded to the `/srv/salt` on the remote. + This will be uploaded to the `remote_state_tree` on the remote. - `minion_config` (string) - The path to your local [minion config](http://docs.saltstack.com/topics/configuration.html). This will be From eba0e9eaf8fe1c43feba1a7a9ff2e9832a277b7c Mon Sep 17 00:00:00 2001 From: AmirAli Moinfar Date: Mon, 27 Jul 2015 06:28:19 -0400 Subject: [PATCH 046/100] Ensure that `/etc/salt` exists Make sure that directory `/etc/salt` exists before copying salt minion file. --- provisioner/salt-masterless/provisioner.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/provisioner/salt-masterless/provisioner.go b/provisioner/salt-masterless/provisioner.go index f856ca01d..573e93861 100644 --- a/provisioner/salt-masterless/provisioner.go +++ b/provisioner/salt-masterless/provisioner.go @@ -130,6 +130,10 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } // move minion config into /etc/salt + ui.Message(fmt.Sprintf("Make sure directory %s exists", "/etc/salt")) + if err := p.createDir(ui, comm, "/etc/salt"); err != nil { + return fmt.Errorf("Error creating remote salt configuration directory: %s", err) + } src = filepath.ToSlash(filepath.Join(p.config.TempConfigDir, "minion")) dst = "/etc/salt/minion" if err = p.moveFile(ui, comm, dst, src); err != nil { From 97e16aeed9ba4d0ba0df18f20085988b3c6d21a7 Mon Sep 17 00:00:00 2001 From: Kevin Fishner Date: Tue, 28 Jul 2015 15:04:19 -0700 Subject: [PATCH 047/100] add updated analytics --- website/source/layouts/adroll.html | 17 +++++++++++++++++ website/source/layouts/layout.erb | 1 + 2 files changed, 18 insertions(+) create mode 100644 website/source/layouts/adroll.html diff --git a/website/source/layouts/adroll.html b/website/source/layouts/adroll.html new file mode 100644 index 000000000..bc5b32c40 --- /dev/null +++ b/website/source/layouts/adroll.html @@ -0,0 +1,17 @@ + diff --git a/website/source/layouts/layout.erb b/website/source/layouts/layout.erb index f66adb067..c809b6b55 100644 --- a/website/source/layouts/layout.erb +++ b/website/source/layouts/layout.erb @@ -71,5 +71,6 @@ <%= partial "layouts/google-analytics.html" %> + <%= partial "layouts/adroll.html" %> From 8741a6df2374b97a2baceeaf2c903203eb924982 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 28 Jul 2015 15:29:20 -0700 Subject: [PATCH 048/100] Renamed .markdown to .html.markdown to be consistent with other filenames --- .../{googlecompute.markdown => googlecompute.html.markdown} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename website/source/docs/builders/{googlecompute.markdown => googlecompute.html.markdown} (100%) diff --git a/website/source/docs/builders/googlecompute.markdown b/website/source/docs/builders/googlecompute.html.markdown similarity index 100% rename from website/source/docs/builders/googlecompute.markdown rename to website/source/docs/builders/googlecompute.html.markdown From 0c7654358aa9ded65f103d0a4d805f0e2b9da643 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 28 Jul 2015 15:30:32 -0700 Subject: [PATCH 049/100] Reformat --- .../docs/builders/googlecompute.html.markdown | 148 ++++++++++-------- 1 file changed, 80 insertions(+), 68 deletions(-) diff --git a/website/source/docs/builders/googlecompute.html.markdown b/website/source/docs/builders/googlecompute.html.markdown index 7e6df9823..56fdafdcd 100644 --- a/website/source/docs/builders/googlecompute.html.markdown +++ b/website/source/docs/builders/googlecompute.html.markdown @@ -1,36 +1,44 @@ --- -layout: "docs" -page_title: "Google Compute Builder" -description: |- - The `googlecompute` Packer builder is able to create images for use with Google Compute Engine (GCE) based on existing images. Google Compute Engine doesn't allow the creation of images from scratch. ---- +description: | + The `googlecompute` Packer builder is able to create images for use with Google + Compute Engine (GCE) based on existing images. Google Compute Engine doesn't + allow the creation of images from scratch. +layout: docs +page_title: Google Compute Builder +... # Google Compute Builder Type: `googlecompute` -The `googlecompute` Packer builder is able to create [images](https://developers.google.com/compute/docs/images) for use with -[Google Compute Engine](https://cloud.google.com/products/compute-engine)(GCE) based on existing images. Google -Compute Engine doesn't allow the creation of images from scratch. +The `googlecompute` Packer builder is able to create +[images](https://developers.google.com/compute/docs/images) for use with [Google +Compute Engine](https://cloud.google.com/products/compute-engine)(GCE) based on +existing images. Google Compute Engine doesn't allow the creation of images from +scratch. ## Authentication -Authenticating with Google Cloud services requires at most one JSON file, -called the _account file_. The _account file_ is **not** required if you are running -the `googlecompute` Packer builder from a GCE instance with a properly-configured -[Compute Engine Service Account](https://cloud.google.com/compute/docs/authentication). +Authenticating with Google Cloud services requires at most one JSON file, called +the *account file*. The *account file* is **not** required if you are running +the `googlecompute` Packer builder from a GCE instance with a +properly-configured [Compute Engine Service +Account](https://cloud.google.com/compute/docs/authentication). ### Running With a Compute Engine Service Account -If you run the `googlecompute` Packer builder from a GCE instance, you can configure that -instance to use a [Compute Engine Service Account](https://cloud.google.com/compute/docs/authentication). This will allow Packer to authenticate -to Google Cloud without having to bake in a separate credential/authentication file. -To create a GCE instance that uses a service account, provide the required scopes when -launching the instance. +If you run the `googlecompute` Packer builder from a GCE instance, you can +configure that instance to use a [Compute Engine Service +Account](https://cloud.google.com/compute/docs/authentication). This will allow +Packer to authenticate to Google Cloud without having to bake in a separate +credential/authentication file. + +To create a GCE instance that uses a service account, provide the required +scopes when launching the instance. For `gcloud`, do this via the `--scopes` parameter: -```sh +``` {.sh} gcloud compute --project YOUR_PROJECT instances create "INSTANCE-NAME" ... \ --scopes "https://www.googleapis.com/auth/compute" \ "https://www.googleapis.com/auth/devstorage.full_control" \ @@ -39,38 +47,39 @@ gcloud compute --project YOUR_PROJECT instances create "INSTANCE-NAME" ... \ For the [Google Developers Console](https://console.developers.google.com): -1. Choose "Show advanced options" -2. Tick "Enable Compute Engine service account" -3. Choose "Read Write" for Compute -4. Chose "Full" for "Storage" +1. Choose "Show advanced options" +2. Tick "Enable Compute Engine service account" +3. Choose "Read Write" for Compute +4. Chose "Full" for "Storage" **The service account will be used automatically by Packer as long as there is -no _account file_ specified in the Packer configuration file.** +no *account file* specified in the Packer configuration file.** ### Running Without a Compute Engine Service Account -The [Google Developers Console](https://console.developers.google.com) allows you to -create and download a credential file that will let you use the `googlecompute` Packer -builder anywhere. To make -the process more straightforwarded, it is documented here. +The [Google Developers Console](https://console.developers.google.com) allows +you to create and download a credential file that will let you use the +`googlecompute` Packer builder anywhere. To make the process more +straightforwarded, it is documented here. -1. Log into the [Google Developers Console](https://console.developers.google.com) - and select a project. +1. Log into the [Google Developers + Console](https://console.developers.google.com) and select a project. -2. Under the "APIs & Auth" section, click "Credentials." +2. Under the "APIs & Auth" section, click "Credentials." -3. Click the "Create new Client ID" button, select "Service account", and click "Create Client ID" +3. Click the "Create new Client ID" button, select "Service account", and click + "Create Client ID" -4. Click "Generate new JSON key" for the Service Account you just created. A JSON file will be downloaded automatically. This is your - _account file_. +4. Click "Generate new JSON key" for the Service Account you just created. A + JSON file will be downloaded automatically. This is your *account file*. ## Basic Example -Below is a fully functioning example. It doesn't do anything useful, -since no provisioners are defined, but it will effectively repackage an -existing GCE image. The account file is obtained in the previous section. +Below is a fully functioning example. It doesn't do anything useful, since no +provisioners are defined, but it will effectively repackage an existing GCE +image. The account file is obtained in the previous section. -```javascript +``` {.javascript} { "type": "googlecompute", "account_file": "account.json", @@ -82,58 +91,61 @@ existing GCE image. The account file is obtained in the previous section. ## Configuration Reference -Configuration options are organized below into two categories: required and optional. Within -each category, the available options are alphabetized and described. +Configuration options are organized below into two categories: required and +optional. Within each category, the available options are alphabetized and +described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `project_id` (string) - The project ID that will be used to launch instances - and store images. +- `project_id` (string) - The project ID that will be used to launch instances + and store images. -* `source_image` (string) - The source image to use to create the new image - from. Example: `"debian-7-wheezy-v20150127"` +- `source_image` (string) - The source image to use to create the new + image from. Example: `"debian-7-wheezy-v20150127"` -* `zone` (string) - The zone in which to launch the instance used to create - the image. Example: `"us-central1-a"` +- `zone` (string) - The zone in which to launch the instance used to create + the image. Example: `"us-central1-a"` ### Optional: -* `account_file` (string) - The JSON file containing your account credentials. - Not required if you run Packer on a GCE instance with a service account. - Instructions for creating file or using service accounts are above. +- `account_file` (string) - The JSON file containing your account credentials. + Not required if you run Packer on a GCE instance with a service account. + Instructions for creating file or using service accounts are above. -* `disk_size` (integer) - The size of the disk in GB. - This defaults to `10`, which is 10GB. +- `disk_size` (integer) - The size of the disk in GB. This defaults to `10`, + which is 10GB. -* `image_name` (string) - The unique name of the resulting image. - Defaults to `"packer-{{timestamp}}"`. +- `image_name` (string) - The unique name of the resulting image. Defaults to + `"packer-{{timestamp}}"`. -* `image_description` (string) - The description of the resulting image. +- `image_description` (string) - The description of the resulting image. -* `instance_name` (string) - A name to give the launched instance. Beware - that this must be unique. Defaults to `"packer-{{uuid}}"`. +- `instance_name` (string) - A name to give the launched instance. Beware that + this must be unique. Defaults to `"packer-{{uuid}}"`. -* `machine_type` (string) - The machine type. Defaults to `"n1-standard-1"`. +- `machine_type` (string) - The machine type. Defaults to `"n1-standard-1"`. -* `metadata` (object of key/value strings) +- `metadata` (object of key/value strings) -* `network` (string) - The Google Compute network to use for the launched - instance. Defaults to `"default"`. +- `network` (string) - The Google Compute network to use for the + launched instance. Defaults to `"default"`. -* `state_timeout` (string) - The time to wait for instance state changes. - Defaults to `"5m"`. +- `state_timeout` (string) - The time to wait for instance state changes. + Defaults to `"5m"`. -* `tags` (array of strings) +- `tags` (array of strings) -* `use_internal_ip` (boolean) - If true, use the instance's internal IP instead - of its external IP during building. +- `use_internal_ip` (boolean) - If true, use the instance's internal IP + instead of its external IP during building. ## Gotchas -Centos images have root ssh access disabled by default. Set `ssh_username` to any user, which will be created by packer with sudo access. +Centos images have root ssh access disabled by default. Set `ssh_username` to +any user, which will be created by packer with sudo access. -The machine type must have a scratch disk, which means you can't use an `f1-micro` or `g1-small` to build images. +The machine type must have a scratch disk, which means you can't use an +`f1-micro` or `g1-small` to build images. From 1420e2494cf7014c31df6c4914447c21ae1cea1d Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 28 Jul 2015 16:24:43 -0700 Subject: [PATCH 050/100] Added note on 5g upload limit, and workaround --- .../docs/command-line/push.html.markdown | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/website/source/docs/command-line/push.html.markdown b/website/source/docs/command-line/push.html.markdown index 06e5a3c98..140c996d3 100644 --- a/website/source/docs/command-line/push.html.markdown +++ b/website/source/docs/command-line/push.html.markdown @@ -67,3 +67,27 @@ Push a Packer template with a custom token: ``` {.shell} $ packer push -token ABCD1234 template.json ``` + +## Limits + +`push` is limited to 5gb upload when pushing to Atlas. To be clear, packer *can* +build artifacts larger than 5gb, and Atlas *can* store artifacts larger than +5gb. However, the initial payload you push to *start* the build cannot exceed +5gb. If your boot ISO is larger than 5gb (for example if you are building OSX +images), you will need to put your boot ISO in an external web service and +download it during the packer run. + +The easiest way to host these in a secure fashion is to upload your ISO to +[Amazon +S3](http://docs.aws.amazon.com/AmazonS3/latest/dev/ShareObjectPreSignedURL.html) +or [Google Cloud +Storage](https://cloud.google.com/storage/docs/gsutil/commands/signurl) and +download it using a signed URL. You can inject the signed URL into your build by +using a build variable (environment variable) in Atlas. Example: + +![Configure your signed URL in the Atlas build variables +menu](/assets/images/packer-signed-urls.png) + +You will also need to [configure your packer +template](http://stormchaser.local:4567/docs/templates/user-variables.html) to +use the variable injected by Atlas (or via `push -var`). From 6fac13868b5e5fc47cd98719197f32450c7e3a5d Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 28 Jul 2015 16:46:45 -0700 Subject: [PATCH 051/100] Added screenshot for Atlas build variables --- .../source/assets/images/packer-signed-urls.png | Bin 0 -> 40501 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 website/source/assets/images/packer-signed-urls.png diff --git a/website/source/assets/images/packer-signed-urls.png b/website/source/assets/images/packer-signed-urls.png new file mode 100644 index 0000000000000000000000000000000000000000..4e9e17010d7d44c0b4cb21e62ccccd116ea8a4b3 GIT binary patch literal 40501 zcmd42Ra9JC7Viy&KoJ~*yCfmF6Wk#{f(8%n?ry=|-7QFPC%8*+celdb^=`WRoRdzE z@8Ns6V_Zf(6nocRI@erl{^oxNN=u3&zQTP40Re$1_EAU{0^)@+1O%ia2nM(^F5M&p z0r9H9NKjB(Oi++m+S)?j$W#vk;$vWJ9PAf4Bg~$bt1&zpk}d>-rru{eM5li`=Eq^5$o29W`DJx!tJtLcjFFD zyEA`0ayjAzBSYMfs8W3L)Pwklp&KC3xQHz?%7CzN1OZRX_hO6BZ%_=2(cAl>ZMnM# z_9Gpd!)JZ{2e)MpL}gmK)E5x1LoD&xQlo>8ameYceXidN%W8EpGdiV!Q- z>Q|poIk|EDYx4G6t0%^HtIPV`V)_-caA;{RakH&1$K$^5_p6`4_V-60Kcfym`M-_Y z5Q>xnRg6V)7m7)qMVArN(i^7TEX+PCOTNDI8D@I7$gQ7NXNuh7g!#^&37kly@-sfY z?NzdYj7oqm58Wcu7sm9S(|E&=a50u#%fT$evw>QXXDPqJK`G$#4e1tNLI*K5a->MZ zFf1prGzJzoemwuC3=%Si0Rw*37@-&O;MMm(;M?Ckv?H+n`lKaZ_B}m*jpj$5fuH!{ zARkYelce??4BY@++&=O#L|Q9R9X2t(5`^jowekxgc^Heq0XD>iaUT=md!J4O7<6_P zE*Obc1{Qs#ghbUB^foYH^^-gQN(1<;bu43}MTnUxQt2jXogSzWuYJG9DS^*mY=|)r z2usfn)L=%i@pd8+0x^`)hAkBoOUBD{Pq?gMm=92jLgjQIC@o^#B53>wkgO+2*YiRX zzW2pT&37~KeS9xJKpD5fS-xWSK3ie@31jYQZb`%i`Jf4Egjru9OYkBF!O9R*eh2*! zqy?8`SQO&tRPN6b(~IQI&JuS*)o7Q4XL-E)XPJ`+zG`5c7%O(fG{ z1Pl9Rv=(mBtG)JMEzShEyEd)kS05nN5e+)(j*))8F7XNP_SH_wQ>4eA^o&sq3Kx4k@EDjk5nTLFa2?J)#&%#V2Zw*DOkU8)N z`AbDCgs`&uM_9Js+QQz_Izw=WJ%(j!exVn^hDCr!_Z4f^)jFkrlR!D3JRm>dHh{84 zh8dirb5T~KL}bd$3fF+78o(W3)>)(FQszAahmVvOz|_vZ>SbyA6^0hMmdF9$0d6`N zuidRZ`gqq7z7dNDhOQO7B5`bY9ek~RPwyT?EI2~)0mTdSwOvI3b(JDB*3zF|D1o#O z?IJ+3lU^Im3cfsMhObfFRoqpIG?H2D@_P0PjO^;QH#I&e`%j0 zS+mrI-Ur;L31n`|etpjngV2X0PP6%HlYKM#wS3=~pf_EybL8^0ljM_B7a~c-#bkpq z>hDu&)JWY_Zo|$K5cowDyVd+?DrIuNZl!!mAMl};pZd!BkyXA%x<>NSZ$AKEQc`h9 zE{rOk%0cKbXn`0*crvSAj(wtdB7356f&@1%S5#*-%J5|sdKK;5&&HI-SXZBW+#Bl~ z_|pg5*J|-YS$QMD+j-l_+ZvcZXtinAXy%&pwPcR6#9r#XRD78x&@7PM?&eDugdi-{ zrQ4+zG#E4-Ga~LME)iQ1gY;fa9#<~+Jtw72%vr3|dtL=kc?5ZZFH=A8W$KCmO2aDeF`nvDv0}DBYKcEm(L-;sGM5bEcy_2OXbJq{p+Jv3?m2jIxZzH53Y7- z2d)~fT#|FrY7%==Xwq<1fQr4!iHb|rTy=N#Wc7=xX_ILa-YJspnH}=o{qdcN#EHrA z(22#|`6AA&yo{c){;9%gmjbC#hf$4UjUOJv5GI-i5vGYF)$IA8_CXgzumjEE*ZakRIqI(c z{8@Axb{mZ|{4<3!2TVUq{9x-~m$=swbp4;>XyPo2Ly8;~{EG-x*QnX_&RB;I(*~ZR;lKSdHn>2zwj$E6gs8OyWH5 zI1XQpar^sDr#;91#L<9dnBC)S-Li0p?}6;jX}f%X?4HUd#{jeDTZV9^<)4XlfeSpTqMzlmMtXgH@P*$M%N0|@x;jO= zr@GwrV9Tjy(XEPpzOY)ycNpRsGAW^QKz4`$cT#O3_HcG#^@Njuc(X5w{n=KGS`2@^ z;jI7KBHA^IGU{!#cT{0?o*bzhuH4Cw@*iMmYcE}X(Il0*q$a#slnbTHN z)uq!)EqV{FFk5~dc$uMAc=K#VW_^pyRkJQOgP3b^8#FDO37fCMt~0oG)hvoE4mO7? z$V)7u($^TKP?2ePN0l7qPUD~qSJ;lUPVO$BM$Ea$qOQ!(Hm} zOZ^Vqhw2SI^$vUPD-UU~tH^|xG)H?Ly>1^)DK%h<4URm^R?l86jcdYnrg7zF ziMz_%PlsLFZec%*-_xk=$?o=F3R;qEp>i#~Zoc)N^uG*T2?P_#@k)N)@{qm%ayENt z1CH;d>yT5{u+ord!ErTtIJ|n;vE4ndilEURR)KsSW$DTVaZ~`AA@qVz*!881@etWC z3^RDo0Pdb(_-1>y3^HR1;~P#P=C>l)NO>&foJa%_#sQ4hxVWf$gYP}6Dg%S0EM!yP zb)9dy*AT5>AYAv7)6$I7(jFCj1Kbe!=6l#Jc+eRGo{qK>g_#~)Y-`P6o=~44_yeIW ziet>Dfg^0H(HA9KrO%%@bS%thwRA1C^=KWx*45{b6%zT+8sho>zw`)88Q=_#niD_`f(p?x(_P;&;vZwM1_$x%9BY=p<{K z7BEa!jYtT<(V&1b-#~*%$Vp z8>hOy+j&@Nbl~LFOXofqxxzQ^yjQ#@8#BylxBDfTUdzj@cF}&S)?%3>KHdyc!|fur z>F%(h=HYT)HJ!)JSkL9*V%n_r8{|}j!;y;7c<$%Q_j0*2<_!l5R5bNu%SBmw(_j~9 zp7v%N@X-6hL>7}c7K^2YdfOe@^@PbnMLgG^am6at1(BE!WtweJs@2BwvdyFGL8}jq z^75GUqcN(RNV=crXz^95M;kjhL5dZ>4QHz}8imp}nR->j zVSWGORm&4j>SieGT(AAC9v0_8)%gB`&2V)hlfm56!}1f(S|)9c<*Go#MwG;UqCwgZ z35H3hWtSsvjiq<#uCh%Qf{lqEtg=e4#-(_RsrKDVah>=OIB#cO&M!aKbTc?CIprOc_Ym4I zSoO>%FzDQ06=!8-bu+l~ZA3jCcOaL>Dui&-CM#*Rcr>&x-M&pLcVcss)pC$5a2q07 zH%phFIXHZS{@c{fltI7RlIeJHegjT-@np5$_&9IfPg5)$jQO~~Lye5X*y++v=(K^l z9Jy4cgsWhNXVF-G@N+%vF?Qw@XiSg(K!O%5k*lS)EI$v~P~9QtQKL=ju12MPsG93Z z*W}X77hjSJ+NN{a;4WilA9csomkqbOg=W)g4%OLV9DQldyYCy;!#ImnYt0{RD3IxJ zp%QMbt4~j@RU7QhRHW2O^z?1GmP%zE>XWNVorY#NCK-NTC|&#+#DvwcREl#ZrA=EM z*OELpy7Tt)W`)AIO5zd{h(|-}E(e<(W7{(ZIv_5NEyt!fIt_Iy__PLOp6k};$J;#> zBZ)X_hDL?)JgL%H5uUNAYVX_pU`eMBFmwtNATasCdN^-Qeve<{M1S+uvd6N&TU5B4 z4||llvN8@$LAT4Cc=BA0nTbKFUECGYm!e+y&J!XUb^P%cKBM$g)HGHLPh4L3Y%Z6k9-&OJIJ;sC>h3q2>fcjs z$HW4~CeyupN%!pvi)#v&hVdGW5D@MGqr7rO(NSx#ztE;}exPmGXXJG_{q_PWKaw{Y zIfX23b(=+Mpms`a>Al1tW=Tn6yTPTu#aKnPJ`rue{C8oiLIh5=IHRG}#)%yHpKe5S zA$UDz%T1l`%taG(Y#NIC1^26cE08^8YAc2FY7RUyuWd+l9OcbazD=|y18Oou=HMZJsW)FZUN^WEX@&@HB_sn!!P zP;Pg<4L4iK^9yx0O#yd{NABROr&VN+-F$joiO%p z7lOp`s~+CR^4ky4%fWQbhFEI0G8fJ0Qe2}s^Bd)upt7fpTE((o5zI8y$Bk%nx@_5{Ag`R|l z$Qo}7tU%h)C{67`RKh;*)5DR+l^2SXatypc$lj|vRva{z<^-IlR(T*pTeo#oT_5Qktw+8S{BXo?X_h-W{ ztHM7Cz}Nj6k7Ewk9>?B9GyX*GhT*)Aj&`U4_&!U3?U>NL)*Q_iMaJW_FB@6^DXHr=5I(OmUz`g*6eV)2 zno21*?FWBISPlHFm%?e6;_-MhRjSc~>%WTHSvdVeE)VyvLz^6d+;tPv!jmDb%I$JC zuiI+Yy>w-m1X?XJJq~l8}t-6j92o`7fkqZMq;q===HVCI2~Qu*e$O3Eanq3xJ*y-+Kb4m zX6ZF;?bh`NR(Zq3%y?YVR8on=&t2D9-5te<)4fKG*17H6_qdClePZ`y&z0ggryG5{ z*YwX?XI6rGxg)mPm606+(y8Y0!E6thyed0#1#`Ne_>EZQRwfT6c>DV64s%~s5!lbP z%~hL#IcGo>n$%ysdfNqk-}!OCc?(Tw+0LcAo{G_7J4tP8f%bH&g%m=*e{G}<^s9xo zz@&mb1SXV+-A8(@PRAYF>kTM(yV{FX# zW*;B!cC=Dm+PEP38oe^cH;pjJg!^5u(^xuDr_oP>^3aCK3G+U6+xe|Lju zy#hA^HMim6qN-oP>r-t_9?QPE71rcS{OyG4b_Yj?m?QWTJR1`Ve=QMUp%5!Vy@YcU znh7D|U4p0bLC*@1bXBrX3*%ZsYX@(y_}|F?kfe27NzjQ2^+n-@hPJ6fLFOlqO3%4M z709$nHLKgmIZ)a@uWTntd()^HAV}@=bkgj0cgez~U-aqY4Ebt8BM9|=vedmRa|zVe z!6|sN1C4!QWy-uk7Zcgf+D94hx;it`-AcAUY#$u)CWR3@ctDpClbU~n@m&N40fP_I zbmDOBvU>wE1LAVBv^vP5OYjxvF3-h;oExLjSKd>yHBj&Lz@6?ehDf zbszLvD1TK?^8~TI|F{ITTVz}F*mlGgGmRKJe1t#kN8C{R*+5oWfo%`0j2}Hdz2-M8 zpMi!~S>zjtHbYsN@pT}iPPDU1B(7sVx`bo*6Q0ynU+p1`b}uJu$F2l_%n*CVs}N=Z z){d{`5rj1xI*$r#3p_r$1@w&(Z1tfPud=A(??)@5nufo;CB*@3)9>f!^C=qj#u*nP zhh;a17CkFeLK74&H%EtM_d7G9w}Dq-PI{vzgPKq_(;}g`o7`rzRsKix7mON=3?z2pLm?U3lU!DSlXtg7YkQ}5~OKruq~llJ(z@;}Chi|^@mo^@>M zdeyLe5p0Sw*P~6^hK>Ou{Q?T#xRLk4&t>8`1CGJbO{aZSOD?{AT7=jVkLv^-N*O-B zEZbbm9#0SI_)#LDr4D4?eXvA%*-N{qcxe#XGx&6JXZ(}brx8b$!#oWN+#r4- z*vweRv%@-Yno7@iZQlg`4@|JgQWQ{eH?DQqW}9;r0dO`wD-Aq)Fj~G~gpTzt@vH!| zxo#@sqWVAzgskwck0|;+k)-k+MVu1A|kMzye`+K0gR=yr*>hh`w6YSE zr~kFs>)0Q-A6-xosHFvZ@dY4J6Z=qzQh34b9kD0Z^vI#Y@6kdrXi!zC>4-YTKlnL0 z`0Tty}G&D5}a`e9L{8%a9UxbS!P%txXQ0M^=z#;WJE6Ee;hzU5DU0mmB4^&QcGXG=+X# z6h6eBL7h6x$IYEqTitbVVq7YMA;0dVxV)I}3MuP-FR^xcW`%LN9^dznQk`Dk^;!4# z7-CP9c#GqF?`=wv298~y?+2&7;@bPlrUdlvuY_A=qmh#5AE0rwULs3nt>*Mq<%?;j9pgwL>iN;Pl<`htzU4BZU2M$#;xy|4ly!UKuB8>N z+e2Ct1=nh6%AxSR&B@Ivf@oMT#zw6l;Xpl|W}17_LEEjbFr15v3Xu*Y*uE%SFFGBZ zU0s}{omd`(A(>IG{f0_hB=kWkK|}FE>?!CsGO!_@u_8b-4YTL?oAdiuh-bJDJ!RwC zT&^>E^Ct)4TLpd0#|UdO#{4Z9e2)1&zXL!bN_$lD&%!A10rb#bt~mJn*gb!bi23HD zB1=8@{x=K$7m4sCBhFO#a9Qx1clq}*FbXX~S|Ui8^Ph9!e@=&%K;5r;)LDXm>)ta) z;gbbei&(i~-ajXif76xMVL;uhGllwpw)Ueqp8(B!%!Ai|cCY;z&)~aEQvcanF}RpF zi^=OlzVKLE34;Fr(}Z>M5#Z>?k*-92STvNA5AkP>D4va|(%U*q(gnnS-s(jLhVI_- z%lrQsIyRtRPJ%zY{&PJ3`bE zmJ=`yacnV^AOCUk|E}&-5Evs~eEFb1M|C3<7}ft5J0cxd8ih&w)i4-~u7<^AYNPOD z?05i(89+fCw%eMi9Cnm5%JK>db1CdLrZ=aX-7dd}soy{w(R>Z|2SuXcgbK2<4j#`l zSrRJvy;!Ndz25%7m`0^4-^8kK83b3P-ZZy0m~2q2T1%Utl+13Edt!GqQ=SdfS%fM( z_5SzO^g4*o$*C?g7{Cq1ilypy<|2zZX(mu<%z;%L8o?rJ5Kw7HpygNroHS-GXEY<< zdZ+@$e`W%g&aCauc#Ygo4$%yOiFfA=XY`R}v_#q*Z zLb?ht!Jx~re62sM_H!&yl3dd%hsA%}xo6fJV7*txiy;12lOa(J3!sBeGi3`EiYu(v zyK$thmRydSl6R{|o&H#4%ra2IV42{ab$h^V&h^u^n0mt?_U%?gfMXTa%K`eryK_S1 zew|52H|;;%1i-_~Ub1lP&AiHYX^!oO0G0ag?|R$@YkyEZJ=g9FADwIRKMsQ&je;Zd za*%C#CIpA6NFtuL=A;`>Ran^fs^kqT*DX6{@a*-;`awZqNyW_>STx#nG)wq$*yC}0 z$$5{4>iw?+TOyBpE0^<8Q8yfxSD&ws0d6+m8HkR4so(~vT53qH6-vG)GOPWMZzGQ6 z8};R>F&gi#MQ!tAGimdBS)=<44Ufh6i}t*U>k~|cCzaVBs5I=AG~5$+ z^TQIvm|VWK;=1~#*B|F(;d*tr7@fv6eE>vOXWo4C<_*PwF;nkY&F;JI{>a-0s;hZs zA1VOD{fyagJ|^L|xS+w|aG1i3{6Yc<=TZ;}I3|z)QuDp%A2-am{#M#nAhga1TPx_O z@hohWhV!(Sj7ioPH2u~=^VxYI1 zGUlb~O#-qP_P{E?+=!N%t1=pQb-9|i;N6?8%#-ps8)Rz&hu0tz@hsRcIe~Z2@@e@j zg8U%7K>$92@tBdG3my*#iKx zTfYx(JX(QM9aNT^-J0?duROQg*cNT07XdS7)og`m04yDe1C zKO8z#_Ju>N6I!nYzRTru-<|m32io`mR~?fO^B1WPP2w{z|8^1J<)rC@+YF4lNFJ<} z{q?H_Fvkqw-gdjt&$hEx!=8QTGers(xYPiUc8&gE z67E4ane|B*)*byIiJ%{L)&XGfl~>qfTYyz89aD_Ky@`pIOe)es;Y%QL*)L~NPi$>DPoTLa3zMiw?^dscF%Y9KYbRzDm7Do(AY0@6qHOzyKfOP%ls} z1N`?io^Io@%wo80wm#`(M&^$?;c+`xEem7I(+d%MA@c`(6X1(-0DQ-#|A=x&gBP)` z`=U28kD)DfwqH+3grq!PjPy!+k3fnv*TES9$;JiS0Fz|uvX#^Zi!G5*qkiIIT9oBOj#4MSJYAyhS>Pk)7-plEQNT*aL7X9`{ z5}%NHE3V3Tl9K$!tbkp(3O&=;G266LZ(<$T$`lrr_m*2)=z@{7CXNrao^FOc3Vf9k zoaHikoB-T) zOI^k8#ADSTz&4pKN%a^O;JqI_sU2LVAnZq1QzG?#uk_)G`*NnkKY0lt{O*MKpN=Ee zKR@kAdF-B{;4EHWY8XQ53 zi9xavxOoc4ysku1_QD$($O+txnKNm>&(c7 zFp)COX7Z%cRc@Y+{dupEC~Ri7ZP5vpm(`P0LPx?bNcpg-2#z!zO^<(S2D}n-2N~py z<14%pE`o;isr9GV;F!Yph774M-F!&^mA?9hG30|;`G~F4KQ#XkXC2E@M)=>bX%hlmQ*NZz89^S{3aJQT94=3Rs=Lf3` z33x%R{3#<_rt__J3}s#?Y%eXY$(ekJ;p+xcIj6hB2viByK%zr0;Ng)NX1plKsFN%W z`!;y^-2w!Ms8V=IMpu1TI#q`=P}Tx;%6K1kq|}wjpz4OGSv!LFBCkL7ra#?-)l+PU zbTeK-dfaTrD-P#fycBsPF-^XMTBhQ<`N}!x421D>nf`>krHEdUWl}Lg?1c%|<1Z9a}W2S)=4wD)6f*5w)`e|yf!w3m9 zICev}{4I+|M#JMWue!um&c}gc3ImsJBcbDprwU?@&EN_|Di}!Z-0^TbI<`}W0m#7C zjt2(+I*ink5$=-M6`b9=!y2cWp)bEIjkXgV3Rl@Nu zA^evE2XztJ*0)IaE(}G-NUskaE<$(LcY>`iQE~dDreRKDhEcDAfF(We4lo$sB9@v3 zsh=>|XX@Ugv_p`k>}mE0gn->vs9FDd^O$6{2#VE@@E5faGCSS))%^f{au2kUGwI7i zno~8fXQf@~j){EG(xNrJ@H|9J<5j2;c5U;i$x&JG+3U9o;O*1S2;k2n+V66u}jLnKUYQK2uMBlwG@oFw>=s3smWBd{ze(*PdDM>j( zWLsFdTJ5q}thcKmt(N-hq@B?V_nP@Ua&(m}>;vghfR`d!xh98_qPkNKmaJ`98H&Ci zmY3p$z%}L_NHuG`Ki^^PEDFQ`@Aoly$c0RUc_VPtTnLv$cVHdP#wj6Lz#oea+Dy>0 z4;RVPW{CZr9+-Pp*(Z!^`5l61>lJ1=hBi!QPl_;C<*zNaH*q+r{$jX>Axd!^Na@aZeC8?XfN^N}Oz3L*FAB95-3FRvT5 zzwpm*E9_pT1rIF0J$uL!`S3Q4M&LE7ftLAuDj|{3mj2F`2SbjdlTNFL<1nUobTPu) zIvnt4tJil@Uuoi8J>Li*mAXx|SLcOfhx5$wI)59nP#>7xL`B6t4@4XPi`W`sL%*V& zc;y)oriWrwH$}t|zH=%ptWv)oWp}-OJjM{u@)^oe$=oj7ZNqEj)TRFDRsK`D;_j2?udCGwIjPUe_~3R|76DnO zub^Q+@}g+^kADZ1d>y3HBO^V2zT(9gp0{sM8u_Tb_^+ujw@NluY(Rs0G9kpW$5TE| zS|r@l);8>fo_^ASene5}mt|4fC76xFfoFnpv4SA7PlbzC1Gm`=X$$8G*nvP=)pYfGi#s!w#NCGtM`=F-IP%cvT(eYVPU`nc&TQyT^xFc? z$T}k|dpk!4Y-35Cm6FoyQhYEfdS5MNfAPD*PopmK{7JI^!OD|pD^%NuvLcC!u;$_< zS4ISome_QHa>#ucX0sUagUhd9eL~qv!6LLPQDr0xL8l`qRCok-$@5|74HZVIP$w^t zOFQX%o0f;mGcXPz0^!!e8!E-Szi~-WJP?bXsN+=^L85+S1;cAa6qe_|)C#7)N!xz- zl|wBwb}_AS)I>?$6b{Y@G9WxLQFZZJ>dLfwCBs0b)}=_}M+C+zCnYw&_+VitS*LK> z$&S7N5M`r}kH0?p+kr;jcCnMSH?*Vz+>Rrl@J_t$%AI4V(;^Pcmwl7H{rC(%gzzs> z)x{G?;TtRwv$|Qj*Zl%?yF>)=l=XR2E%nT)s3a>QKB7c}q!fzDV{k_U9firQn+I?< z?xg^TZaDc5_Z{H_buPS6S+_Hzl9;ZP_WuOUh)S{H;9Eb zP+Vjwu8IcQHD?eu~eO-W3RhiAYl7zh@~X95P07A zzHWl()R5}0gLeck)PTdVKWmgIjmNamn?ODpG(PGL!EX~68||kjb~Nwsq*O}mHYj7p zV#B{_&)%DcNRM<_J@Bo`pfSAVpE3J07FPx zcry+6N2LFn6Ch&REds{rf0goY;>QbfUCl{Rk<rZT$IX#|FQpiIG zdACmdkPFTxG4}nhj(;bSW)?8p8c4kS^)7t|z*j|fds9<+Qbc&{*56}eF+QV@YkrXx zB%yaaUhy0IQZ4t_*78hnAkh+-jbznT)GRSxhxZ)J8l+bPdrOxik?H;Q$+H;aIVgW- zQ(6vl2QFvpQ%&*%&ebJOGk`d19o%iU>|UM(C&shLSvOX*r>^Zi_fBkr_J4*D0{Rt? zC_q-!D*@?2oyzxT9-vZXxGLp#xKL-`L*Ot|ATMd8r#0>IRm)=FN0HrzNhQ_6E;6O* z?3{{NwS6OfmQ!|nC+o$=zvMy_=nsYJ#%>kKpJj$89P-cJb>cCJ+<@8c9QDzZ)5&1P-{&pEuG2g@#wldlg8~b2MLS6vh1)oHRwpP z0tD51&(bX#r845YXmKvbrGlT_Y}w-Zs8f!Eq8K$g;R&I;GTJ^I>gNh|YAr5_!UkpX z_1iryO{!W4Ny%n&nVp2mDn>d6od*+IZ!QPLE0M7}i$&87!#Y(i{)eYeh4bIW( zxfU7>rAyr0dLL1R)dkH znraEv10`Et@x)8Nv}(OctMOFDl38->eceAp7k+)~YV9Bsf9kWVp?>&|3p(qYROgBumhrKG}VlY)M7bLAL!_2ihW+Yi;+EHJ*# z*8Oue_9pUCA5w$A!r{FAoTTdwi!3=a|ABYyhl!k})4b&@=4pd$Z#~{2hFt&e7}%O5 zqLjy=O_0cjeMkF)0%7-NfC?5;otfni>&vi0 zA2BWLc5}0{zxb%x6@+mKNbvx9)1lEe<8u+ckr3heTAMnUBL#&!?q3wW`skoOC-dz_WrueMz z3{`^yT8oq`?=^=3h^z)yu;Rt#u+6{NpRF9y`vK%+Jaa`gFZXBdnu*kyFXL8cC_zWye%l|;Y&w)vKq*Zr3Fd8z57gBocWaoaI8V$5Le-@ zP8;tKRY zHNR%?>=VDD*11|K^{b&#_>N}W<*fIITQ%f^#*KPMvwK6O(E0y$Kj|G!Rjm@L2}7iI z6Qgnbf9)E~#8Bmq9ZG!UR+-V-!YNppQ;kkGs!h(-)rO;4O2cvolu%HvqSqz8zb^?~ zB2V#_Zvn~KTZ(R`ALg~2(464h_D3?k2b0-`gR8ZWozn0oxb( z0@45Yza%ct1l5n7!w<%qz|)paC;4`}6Ti}lhJBPP^*@)V70$`?K{<}07_BzQw&vy= zKU;y}7<6BMe{m5t9q-di@J~Q6tJ>&Tj)+Z9Y^2JV^KS6VN^0oN?Q86u0y+p_+-jLU z2Asx@i&!$uAf@%>^_C7NY+EPchJ)&m(0~MG9JR9c!F;V$QuolOo%c*z?@72ca?Ivy z#3ZO~@1j`6pY6mL<-vzlh}}+XOPeB6!UX=Bh7&H1wOD_qyUzNk6`E;AV2i(4 z1#&2-2Oa=aEWFwY1@fZCQ2p_AQmNNwuLw>&?$?Rlhxf;%G3unD2dp?fOR52KE}>$b zd;UG~RU=dKp4{}?sf*i_dIX0nAkAPWIl_EDNfKD+9xUk8#@xuosun2%4ne6jWNwlI1Tb1QR?``J z7a;p`uv=JK1E`H~wP_slmK@g+?*Qqteh1PQhy6yR=v+>`y77JwV-COreR+7H zU`{&YPG{0XtBuCr8o6KUM8A`&c}A5rXG5Ixfa+2Unfp8<)p_^FRGp1MdfE@S#|-8t zVh}p#TUR6jRCgdpQ~`*|hiO~xt>+E1L-z-mhO2<&&7CaH%k@SanRsCP+VF1$W=0># zDj4^*Gv?$CB!GPY0n@xxEjG8a?N}5OV1pCT_Q2qzx}N@ebm!9U40y{Yk(WRPc_c%2 zKdKjEP)AeA$mupCwGZnpRRhS}Q&}~7wg^n`%(IImf@!P61oO?7-C`~*IO+3ljSR|F zmY$6Udh%aJqfo4(e6-Z`5IiE%IRqr0OnW0p>;d$ItzH1gi=Oxwioez491GXusch$NNJ)63c?? ztR5R;)%|sd3))rir=55;`^2V;Db?l>^Tqm`JG{*ZEFEG$9TXV^CA*!muI+0g%T~xt z%9f_xg2JM#to9*xa0>2SHqkS|eO6u5$bYcc<^T&jA;g8^vyWkQ-PZt;o%tc&C)fFB zjGuw5{s2z*SX#d84#Cj@&@k`@!PEee)KUi$=fLHR4)RkX3g7N(O4)3s!5x3?vw#}e zmSLOuImZHMc-@>?pH|A9S3F_wq0EHP=^=XDP;{gz)$SN`4C>u?=6YIZ>D*6(cLlGP zOe+UjC)1sxcW(8KHA36ZGx^c-)3W_O+8EO$56@rBjjF^}*X;D?&JJ-qREjpc66-ai zKLpVMsAAZnixKw+%9{`I7_>Hf6e zT_g7#7iVv^LqXL{4>TA|Ti@%l1EgD2vf~gJc{swkQW!zCxS^x4z}{2@GHpERw(lkJ zzlKESx^e6k{0Sryi4KG(p7Vas*toD?rxm6@ChQ{~u&W`U-(lFfn1?8XPhPa$L9DF2T zGA{wK%{*{wH+lh!bbbSOgAeFic^&#P2m-=29#7+WQgu)r+6?y;HQX?G=3?H<-LsP& zVy~<{;*e4lAom^nbqV!KsXoiI8i0&QA$Ud480hq;X*#uWl+IPo6F@4>u;Zjx-<&uX zU*ev_f&jJfel2!OSfyrO%_X#amP+5r^D?@4sG5cYu@)KgQxUB}w0-jI65fFcjkxMu z(I!(NzqC6zF7;r=bGiDkxPBh?Y`IX}2C3rfyo{P*WK-n7U#MZ`feNYd(_2$E3ME=X zW83#$;d3PCsrWV=a?jX8s&?zAgcE{<&s%g)0VIi6n{BSLi5_~Th=(U0U8}=UjkpYb zIG2D0!4va(I1`8*H6K8xfVK0a;8@@ihCS0bm3#)vD&UgGa?h-zU=2A5-Y^UI}omMM|h!Xg35wsYi(jrTB=+R!aau z<`8tvvbzf%J%~U%hXr#>lOLfIH=HIT`!x$RF7I*USdPDRmf_N^qf}m)Wd}U%&sKHk zoi}%F$nGoP2t8wu=D8~%vb_%LBYHfB<*j&lR_ZS{;!r)mpfwQ-ai))hQb4B@@v6=D z-7C(u0J26 zo2eheUBVf2(VIz|+{QRH4Oax~Q%HgHc$Mna)()kPHbZ+g^3$kOr%(* zM~{Pq52vUN7GtT8Oc$o(X0Us66@`nBCxe&UG5-6zfr{76?PNA){=-T>Pk_E^Rahn( z7YkB^1h&2mcZ(M#*ZbHB$hO~$G5WtaQl8jmE+=A$de5Cy%CpQA)sK(1^&PeQ4DbWf zii*>hBwJ{{l);6)NtD=87i&kN zF;IUBr-ONgaph9aIyC4FICggwDzgkwtlZQVIpWQs%n1oNN~z?YOY7(E#ecq|Ad7v+ zI(aczk8o3FT$~gu91yk_WcD2e6aebD<)UAp{QZOX(LO^%$F0Q5oA}8#*}po&^$SKc{+T zQTZC_*0)Y|Rr|i1Ho60{pi&KG)XcaaZi!=pE{tj|mS5{>^p5Adj#SR?A`fr% z=bLRWD2uP*Hr9U4H65kZ-=c&YsYl#f#2@hU-$(QchzpL zD8oQ{$96zjJ{)f=^;>}+NGaljyfa(ita7`~SiMXXvqcowyrnfcAgaYfcy@;30Zf7I zHK@1_IBq@5Eo?4GT7dU|3YX9PB%#kVo&~CP=$c`eOO$a%J;!Op*!2!ahr9;j?_wAb zSj*eV3o&8V`G0|k58nmwb}F1(B(DENu23Pp>NLc*rSPt+&?*lvqpKf`8M?7t7K}Im zj$xxk?!Ky99TmE~-E|eaX7ueC%FetTwIOxZH;kBcbq6)`_rZ*C4`Do5(KX*qG;(eG z25cWbyp3-2v_DpDZAzYGxN+C5Z=S89lUo$0GlyaT=U6`}Iu(qWHdOEGchs9|(MfqM zISndIeRzz;p__%oBhqfTnjbfJep103b4nm+-a)PD_2qkXIQ6cvf@M?Mr%bGH4B}?I zf#e*4tEQ)-k}gLK5~S!Q&W!{CCG0x>UY;;!^SsJeT8-!26lE7(H355~CG+b37s=Cw}Du z_ykq1I5tx49UW)#xqYafu4s-xG%^+xX1cfL-U)?q!wh46Tr>(PWv?L~HywjEk4&XAf>;EMdj{dgxY{T5tO4n?h=E7TuD7zSoHqqb}h3i}oE z<~lLFyOJ&%2RsCCp{b1*O_1Kt)AxppkoDf2yzOgueLV4@_t5);J*&jlD-nXG=n-0C%2^(5@j#&9r2<;k$WEYAY z>6w_CILG3($)nj_fhS;Jb-c*C=&+PvTqw$;Y@qp*eMe!O;vH#hv1k35f}|-x@i_k*6sOWv<|Nt1d| zt)o@ZDOJ8uP^57^{jPd`JW-%Y8k%FPP=TGG$N+yHL!OeLuom{j3>Jj#C7xG_Oz6E| z&;0~;_%S%IZsiLh>!|x21L>cPZv9Lub9(n(s9EsywY;yU{4cl^Q7DJIQOX}1_Ft); zm6#S~S%n^b0@E^lRWFtqf#iyIt4u^AYik|);8;DssO(MZV}!NY@x1+o4aWrL^gGY7 z*DG@wWXyj(jQK2)n#}?Z1-(62vfQ!}(>{w2b9KM|OKm$G40&1dfgrvy;&YO6tgMI0g9K(ZJSXIc9*XVnDi7vrU@ev{>cKszz;M%fC5Ku=OOHuK0k^9 zflnOnG=bv+6UEgL6Ic0o*gJ4c(Fcwneo2(Al+nG6Wzw!74@yDxHtQNs-^MV!HuyH} zJfvlK@O8aERPh3!5l}Usys#i3=up> zYsL{oixJr7_s=h?o)HbXh-FngM5=y_^$)8B$FmlYfC86@=QgL_zgdBI?*M|Tq=U{W z@$cXCQ;+>*0w4b4j}IkWC2xc;Ht_rLf4v2T(-ZlRZUHBIy5YM0sXIyz|NhNW5xMz) z9UB3Q{t1u+i#tCXBmVPS!Ef$m9e3dCO#)@Dxj;uM{_a$*5SMlGlmq+H@!)J-NwNBS z?3i^oH`UqP#!tq7^k9G(E6yYJZZ`T_^#Y*b#2QWjUF_7VkqtIK^Si6!J-9Y-Tkqwx z=MPS(O#fQhlhoSBHfp=dCM2nOLxTFcvk9eaGbJv?=dH*s#Z@~EiIWo6Mb0``&jNO4 z%G%`#HF2kF8q(kVD{f)<6apxsffUX@Qul4PR6eIdr=97qa3BsP@*($NXC#^J;o3kY zFcS^Ky|B@j?_O7G2^Ce{IKZr4H5RtDs2dQ96G?hik=qPFuQln$=8|w~v(lHCt1afk z8xu8BO;lDl_5OXC@)Z#8Jh0ezoyD}QQ1ABwwo&yw`Siy~h7tL-r9hog^7+Ge+M%9h8otIrgl_ilZmHIYPnY`?1d_2ih_-rY?#C(*U7 zF+m-*mW+kW+TB$;XFwgV)@R4idH5~1<&kwp3e#7*KhNJMS2i=#-lNI=qz2*kU3kI+ zFvjEG{@f`v%0v5bYjo7gk}lATt#tNB+Dmi2vi#&N=XGnUH2W@68eNX7wSm{EI}wlU zmkMSdLD%G2t1a;kGpe6R8q4LlVGv#LtW{5Kt>t@u7qIhwlLuQX4xag*L37-hCf#_w zT!>&vclW5E`@p+a#3mW;*)K=M_(aPvW9l<=+TH?!$@}`f z4!ve2eg2=pT6%O)NGP<;?du1wLepbo$-{k87OnEZ!PMH#nFq8QZ&#M~SC7xPlS-Vv zdN%XenW_a&!V>8O1-6D{pShgvml1uDW4ak&FGgOFJQq>m|R=5q<@KOCes;OwR7zf zSC%xy)z#apygw@YW4Z+MmKX}|ST-IJ>~M5Y=wf?sWSmaGTBbItyN=f+uSMIYX!c%I zx@J;(ty95~a}m42dVRfxT3v@gglFk&uQs)T3pdM-sg?>;Jx{J}h|ZHNSjn2Y`Ffq_ zjAN1JgM(f9(+Z-M)$GcyvFQg>Ejs<%PZ#gT2*{D6T3<#7@iu7C)JMk&3^dL z+SU{lwiE`O|Kd|B3zxzoTtdUFR{g0Yv?AW*&XP{n_OpD^Tikpl-jmzF=uOeW z5Kzp&j4z4&H+B0mTkUtebwu%BXbiEiYU>?P;qVODTLV(;<8DJF5s9V1aEGCsMNZ$YxR@s7{4WvOed6 zuCZLwfBM-~UzjPh=ihS`xfYm~reb;*c~srD=kDRB|4Yd66zMfx6zdCy6T~>tf6t;~ z5|6bJm@2k$UG>p#ZcU~EXFnvKN_y`RAwmMm3SHBsw7CU_r2tl$oVz~`10Gl+|W z>eh2>`Aph%tcJ#XzqAGay<(X>C>)#$nmqGMP$Cl`fPuT5#vNWZ0wNS-T7>3vTKwtX z!leTu7`$~SrK0~H103>|A2x(EH@D_nW|Tv|tOygbI^WAbE-ev*Pc9cXQ<`ms?hq6l zVGE`3&xI>FVls-+!N(?Gm{CPG;B9h%G6 z$A7L!-?hS(_(0iR4epMLu)9?#Q%yMl1A|nRKRj*n(33@}_P|kb`(vRod<$h^|K8%; zo|-geAU4>BXXkW;;?KaH0q{_vCO{CuGlaM&bFM;=x>XHmB`vV1u-=}m zn3JgmIW^M&c9X+S8g1uREVh=%w{#L@CQn28aMj>m+LNXWUJ!D`>m zxRz}-JSKzBdD{#Xk8$sP%h}pPy+PplvGmuVZ0ETKM8L^=l=>B+OAvN)!KKV>?Yi4^ zQVcqOLaEoiK}X%@1IqI}`T(MPmjU>?<)Dc*!HNZN-{Sr7R=6Q+xht|P(WC`5)~!i$ zfBAt|=4Q_4BW-$NuQ10o3jwWY2V441u{V5(zosJK{4yC#GjF**D%n8$7&!I2Ni>t5q z&B<-wG5?GvbH_af;;L=X_#u>KxD=`cNTF&6A8CC?*VQsV{`ph-;g`I2P5f{1^aKfa zU$EW2SJrafu$z{UkT4B5AnBZ@-O3h5{y^6ej+%G_=#oO0N&{@}vV1HeR_kew z7E7>=#*-+5vAMnz0*)Orkzo+_BK$_2;2AJPv{6XaCQpx7TdKW}Oak(po7qHuk5jvc z9A;7T!XPV)>|HDJ(ijM^TD46Gd$R*rpJutk^`Vn4i5m*xB2oK8W3e#9ibUNvJ(0a^ zK+7)L%oz4F9VCdHJ^m!Y%b;4kCydMKJid54JEW>Ui)j8q^A~WhY~}Ob*2x@u_7p_j zcV+c%nb39!$2eKs)%`Vu%7s5&A53q;Y%3CfPx(`??){gR&&y6nU)9tM^_{vGdOnoB zt-#2>@eOE3jb_y!eiAG^isg+K6(iwE9Ql+=IAswyxqe<%I35KxJSxjlA8{M2yTxwO zU(T%4FbUFGw)q>TB6M|c>;uA6cg7U(xWEoT_q?IEDYawKjeq6cNl^kvPAg&G!1a-S zj%VGgwpuD%x-oy$#|#YYHmdCk%Y*B8vBQ)o1$aFo>-9%`RX$m^ zU{RhI!XJ{!?%6DTK7IC-Jb(=II7m^-pI|-NY#T^%*(Ygpm5}l7x2*()zllY0JBTIa zJQklJ7@d4Sc^%bhX2^qotYm`e8>aeXILd50G=tqu+JrQ zc%_6JRf$6(3F^+>v99T&&8>cxeeSv=ug{1F+1^;iq0FDbIre$`Ut(kqaYVG?CuUA# zN|I+6?D`=Dp0~~+aOUc2jk~45Ytapr@{xUNcckOFuzHmOyX~AnOXM}q75X_Hna7?F z+)3Gt5n#%kdhSVa|NHw7hs(nSFib01w2wHj$o+h!?1OhlRCEz3ZpoTC+lQL2?_j;@ zr0(`6vx8HGn{R!7xVuj1mR8PFir?K~wm_W4#AE{<8%hF5INg2_?S$Su;CNj(0_SAs z`Xss%PCOE&+A7-z!iEKo=_-Xh&s}W^W9|Ib!_Haxz6UqMGoa>^g8SAw<7M|~64DE- zgN1E8S}s>vnz|{3jpspjh8HfXK4)tHkN>$%%at=(XR92xD2FpppwVYLLe92Qw3~`B43rV zT3k5=+&-b0CXm<+PR`R7$P9i`L*Tm%bcy?CXKeZ_k&2k@bd;$yq4mdcwKvxn?$Pha zU(FZfWoRxpU=0nq7y|v*zVVBB_-Y%j5V>04!-y?A)3qBQlv`=o6BPT%IzlwAZRxrT z@++=&-+(AIjar*;XjzD;Fu4T2{SOa4EZS{9V@hr@w%BGY zXKgJJ-BUKXarEh%rBIeP3KlolSC)_*I~%KlAilGiEKiSl1+e7ymcz2?>6M2Lf;mmM z>Ukpwt%A~pAz%wE_z%E&&ubzF@`F4X)>sBN_nBpjLTZ|V*cz6v8qIH>DG7JWww zXo;ibUmN68Zp_-9+L#*77T2+X<%fCm@}L|mgj1rj4q-H8?TjYwp$Nv4c+kxqVf9jh z&v1H|FZVjhAw0riA+53mo!R(eDGH~@@4=Mc?p_wG2ck;8c)E-7?a*Nu+{SuI7U;T^ zONyrC0PJCeq*!68XP=SDw&(T0jgoNJ?>?d++Yx!L1B{-!-&vIihkAiudG`*XgPBxU zhU*fFw>4Z7dE@5@^t-3!lfY*?kVmD7Y2&Jz%p=D`>x;73g@Gmf@UA#6-6eQbTA5JYO@c9t5jnW`6nEZH2i4Tx^$z!!{P}hX(w23dy3F? z;P;OFp8f{~!K1j7%%x~Ca*CfxFWypE#M)C~Bcf(S7EC`&#k2^fzspXmkg<)t>aphU zy*El>#48C4(;xH*06Klf4c-{ReLgze4P;&*@z6W{rtc-X#O5J?B)HIsDlD-4*K%VM z=~OUtzBQBjDg&NLC8X&ybXmIB*xsxiHeUqKZv!!JH>U_32g+P;$S?7ncjvxgh&{=~ zPv?q`Ja;g+^d!^b5D6!<<>QBNGCvkRc9bA6UN=~=&|`_5;UrU;j1V;7DwGUq0#at? zH$}6h^G*GsWe2@=oGuncI+aN$#Y~QplSq^-Go1xlV-bimDB@zcia7X-;x^4v+v*RbzORS*1c ze`KG%^nWo*m@5lI*`Tjnm3Tf$nl2W`n-+}9m+SZn>-KgwRr27I>Tr2SO!h2d0nzqY z$+p#`*!FsM{K%Fs!=+I{PcSk1tJ`1$oFA#;lD7O33xv>E&^~YwWQ~m+!XDnW)!pk9 z1*u7ftO0c9uJ7u+g6YsmpXHC3gS^b9E3Z)viKIhx5vH++z2YDBT}4C*y}D{Ic+jKe zc_GQI3<}ScML`%=w%lwimW9WO z$kfN2Wr;WCFLyFYuO&pxWE`K8uiV#?kjFA7$k3E9uXt%3o8X#0h0iHqHlRjrx&EUB--+m5*6c2JRw;kv7U4_Pzyo-qr8jYZ-3iX> zQz0}Az@(gb-*qFTrWJ1al9g62c-YTwIo9XpwXXb??0{zGTmpAdSe#;vpkB<4y(WA$=ZgVffbWnf~V)Rpn+L zPBxE4c-71fPD0y-oR-jbi845JPrQ}PwLG^rK6ti$bB7vyE+uHP@ja77HQ*X?+rtdw zrzlS&Ky|viDA8O!*z>yVaPZ17PkCmn+A@y6xt?JyV(4+2Vo`%EKi_A4K zNxxvcwX|knvvhF3WV0` zfjPLgKb~0blp)LhHfi*NU2cCJdj4D(5$={96+M2(gm9uofe?GOO~N*uI8<{8lpw0Vl;A_YS*+ZAKUUarLpx?eft^@ z##D+e)hhN7T^cGyI%=Ba9$Z;3y*?ls*1P%s)*OdHrG_#W6q7bM=ZKT;>ppkKQ&DxpyKfR(k?sU4GYvGKt`Y{Wx@ zNY+Gh59R9H5#@e32{uaT?S$5OI+r?Z^XM`vH46QUWOqd%YMq!JuC$eG&1z_Azog8W z6;RYr@#In3D8>$?OhUzEG2mKPvTf`ZP%0CxSV)n@DOAljYf2-X*{wCx%Q> zr4cS)>iaJ~YrE|R;xY;KQCEyh6*0Z8GX*Y1O*85ZD5%*qsk2kuPRDd$%9&4e)`VIvCH9ca@cW_7!KsELXB8?)4L#XY)r}LZs+-$0PQk zBbCQ%Yb^>=L_h2kxo&6D{1h7>Z9|sP2jp46LE3_y(=w9E_EG$X^d#Z#c7K zT#iiNSnJov`NqC{eQXo(tw}8z-%KaHt2|(93%ZF%<+jo%ebIH)_v28HiSgCB#`*eX* zE3`SlDf?g~BuUN$LN6w-10xl!hezn*e|9Co3Ew>c5j92lnjdtLZh+Y2aFbPPIwZ%! z(jeI*5aqTR8s5iGjfoHJ=bul(2JmlwS2VoE8S z>ZFN2Cma*&9=VSde&N8G>JU5c<&&6swjZl<<}&SQe#K{>JdSu{Pq+@g_M)w_QVx7) z5O#HXU`|+A7%$LSGUghhRlJ;2#?TZzovYRF#ZxuL?3z96UY^9>`GFErZ8P`U?%{T>JcMaU5q!-zb~M!uY@(o z`M7+vnq4LJ6*d#ql>J5>4NiW+bhL%{C%zXXso#NU*^t-iR>`>j)aM>9jngj37t^;py>2itr_if;6I z-r!M%S>ps<%K-;EV)1OE4a#dLkI)-FQ-%f@_DP4gM13vh-Kp2zO^&O75oOP*pNid4 zqTe7*jY-O5^H@t`7mvl=6bPNYQKo0)+`2mA$~n$)FJ&qcsd3W%gu}&f80s-5ApKq} zlLtcHPTl!#Su_8^-zf30B4iILt-4wg8J`rIjPQj;S&oz5v5{u%6BO}bV>|)3Ge*^~ z_1s_P(!UpW!C0B>riBc~Jd-(T~OLn_=CF#FkcVSFfm zApMv=onpWW!KNdZ9{MNs^fa5CJZqeuIa^-j52#%%lMSTMS<0%tSpSoYpd6+L^gidi zzWmt#(29S(Fp~uiR-!Lo$LfD4a=>fA<+!;o9D%s2hj%d3{r@}xP%t3n>72hNpseqC zG8eWWU?u^H<88B?ls}EI0W_JzU9bbSoqx5MQZW=XnTdLu;twz%yej~n^`(AYsPkvT z02KsURumxizYfoG(13e=dey({E#Pf=$^^Ke>L|0I|2cJdVwwI~k@KHd8v!d{oZNOu zsYvCMbMRBFU*1=&8fm#d0`YuOkce^=wd$s~V*~7GYTWx%`b^ai-*N9N0Fc9G3CZ#N z_m&<>k?TbOmmqKjsy!@wIS!Dumr~{@^W62L*B2@|R~}o+GnERAI~CIZ6DMdu1^0lu z9T;5zOO5gqUf{HSk((o}{=%Hg*(7IYhQ&ldzQj?agyU=0^nb#E{^?NSQXo>p8hB^8 zbFIVw25R(#1OLmFaIf!-n##n{7T!+s+l1 z*D&Azd<6WcY9=bvS+`{vzh2J>7gg;aE;;|CrNMJ~yv=Q!Ff%U^ufa{%>~;O7o`k!u zoy$hgz}6eb#h1y1#Z%{RSF6gZcOAKnAg)*P!J9|V{_IAXT=2UQO(jWG=0P}D37^bA zSQo)au&lh374?OQHJ9)ooVAAJf@DwUbzS3VaS4OLi+GQl&3lzM9!lmn1UeR}j`=BD zT;}#6O$#Cn7>*fF@R+7PdiLLS37f(^f#o)GEo3d9W~CoEnLPf-*!DMHmiY*aK40+y zB*28LJ>~~t(JyFdzTK=;NwdUcr2klYn7+ST_?jI?(Zy3XUX^2yU!(3$)BWC$8*sd0d^`H(&$Suki8^iQD;i z3Q_w>v%EMmG_Ok+sKARcn_uuW%}{z>^ivD z+N~w&7F2m`XQ=TXCtKW%>lkvun&3hbVdukH`UGCnZ(1F*BGha6j|qndttRH!8eKkV z)W2|oaEzW;0RfOgh~R;-wSH3Mp$g&ePa}98o(MNI3snd$!bneM@^hbOU)soXSnra0 z$f>VHenTW{A=0o=yP^0txnNP`yR9C`JeA68Ej7E2|>q z^<;KEbl^-DpLH9RwyMK%imYF+72HO-P-ee;C=Ckt`JEmt|9)?Wg zOynDpfH)U$9l$7otPU1Vj#!Ug?%4O{kOXxC@cVauF~u3Tq93j&XN-f z@<*qEiya>E>*fcX0j9ptZmo-V&6(p;VnpHc)npV2^rJk@0S-wt?>LTSK0bT)I}bjL z30aV`w4e;%g7M-P;aBwBs}9^l&gS|(FgCa~kP;JdCuQ1Mbbp|l6s1}_WaH25STlbl z1Dg#;G0hhKW&OXIUMGP%aeB7OuK6LRy}ZhXlpgQm=;uHB$1o3^hlXP;s=7Z& z@wSTr@pD~od3AAef*MuM^5WV+E^nv93>@55WsLUk+2i!)c<}aQ;pXMcmM$jd1F;U@ z53imT)8lO$?2Zt`s^3s`hT-7IU7K{ayw_WkF(JE(b}f1ne3Qz?_hparml$l zkX*a@it$rvY8&s#oCB}jN&UK;Df2~|j;hA%Ko0^$FPF#pMxH(( zFHnG7EaC$O%QsNHf*2%hg(ji9g^}=cg+1hEUgLbd((MZ8oiTuYPjeF3EnyM-KL{cL z18KSlC|(Fih!pKFQ@$>kU9CaIvRBdgDw{8t{3NF^Oi<(RR{nLm^ymm;XsYI^(!KT~ zWV^spS7!+vtC2HcWTZ-STT50F10j+Yiy*}i_#JCg32+k)iAtehO0G;m&vz>JS3N$O^FdcIZG2E3<4 zz{zwmJUdGaPY~Y&5u*M8kz%Zh7rtXvrdxr1=RD6u1YV5jPGN%hqsIsmiX4C3p|`L2 z7T(^5anF0;;OTkn(`SrpT9&9MGn7G}6jyW-Edpz1#Jv5=tkZ;!Qhvql#em5-KqT2# z=5k_%Lvz2mofExCi$t z!y_v7+wOCt0f&U`BrqYup7b%|O!o=Xs!HCB$>x7mN@prWvJiO%G;{Wj!CxcoHkJoX zUo&-n$Jn0++TIj8me~`5I*?#mGH%FFTrQ$++jOMaa_AHE74)cwCm_j#L+JE?jy2{8 zfOYMMz!-yrr(qP$`sU~N)8YR2YUjy8%SC0)L`b_^|n z!r2CO=U$om3KSe#e^3{QE?4s0pCXQk%JPy4%^gHbwgJAD_!wk7j&l_lH&3K{)Xfqvr%1X2aS+=0l)75BVI}kJrv>cb&rQfAly3ej~&1$FIi( z0fidTHSKYL;3u+WK?i{IiI?H+;dKjg4FIzM|VkP9hBW6v+6eOOQdD2Pi26Pi=wMxIz?(xa3=^ zpw=_!NruR}VY5gJRTfPjZBu#U3YFSXNS>-#Qb&VB47!>}b_ zVqEsQ2G;eiAm@@p4i;-JBW{poP_0{V@0e-= z`hBUKmO+)g;pPmsq`|@bNJPGIn28zIIciOF`FL~ZO+(AD?Wg>T%Ll_cJ{ zlI=TEdY#I!MAr1;;Mgpa^)>*Jh1yS^Cp{^3UJ_XKbO(X!&LFcY6ek}H6NmI`2VJ({ zzw~Q|7nh8G4}X&6A-U2sj`zDldMVJNeRSu72o5_5UK*7W+>QjujVQ606-$#x!_WBd zqUGc$yFAyy^!k_yLKrn)d%poqd`BVi*Y4MwH?ZUe+bi-4@QXhKt}0%W%&o6qylu@dwKl3!>k zdc(>o#mdK~ds7{-%)w={`FTr8LShIr&y8v}B0f{SIz+Jpk#I&oBs@w4*r2N->wFmUJ%e{lBIt&B%DS^k}RSs7p=g$mFCP6%#4rGYu!0R5Yyi9_3$Br zoo5;CJKjK^Q*vRkGZlt%5&3Hg;E0L{RdWqenb!3@`szTEH)IcR=6)xk&;VllcC3A! zo>%(PyD65X=AIbXP9SQ4yV|SEFX2Z126)HigLgSg6?Qj0>g5IY+v^Wj1@0;^tiJD6 zve6pr1bwU8m{DyJWuZwQ0kS5t-{tM$k3-=IDIO%qLhR=?{20tD9-NN*DX8&%Isfd{ zO5ZG%fYjyw0;LhgF43Yr38w%ngc-JZFs?cWx#drtK|@;XigKp=cx*EA)`~vhezaLb z-h1dC9`c4@kJ8uL$>>%o>%urQlbV-#e}k>S0cNjE-WJjkIh(QB4yy~tEYb0^mGOX zOZnMd8~7{Ynv!J{fTz))d&cLxz>n5ae_Yiny2dMUkk+CZC*OiB9IJ)M8q_wW82Z_ng3h5fi~T2%YWo5(U1lt3KL16_bF*ia0$PBIKEfWI9>6)er>1Xc4tn%FA^u z|28aO3-te4uESg*$U_(cmJkG4)%gQdDjz!{gvU_CUUML-Au+XfLf37EA6=3_8QC=& zW66BB&b~s&wUiYl8Jb+yX-1+;cl-@_P1xFT%QOy7Ki`cG%X=qGD}v*EkItXqOpzNV zS5F$<>%rAdpjs-h6TlsFcUCI#^>AswC$(I!OgD0oH)V}_O%lr2xZ9#pN|HaM{4ENp zy6PgF>wzxjL!H=g?)#Sub$`!@&yiePI5zoxA-Lxl)t@wvcvGKr2)M{9&`Cc>j*Jkk zmXM4cWNIn#AI`r0d_BSLkq9*D1ZH7EQoSzRM?3Bbk^g@YCqsIJ4I_kzu+y({o zcy+tn9DlVXs!&6PE4@jru=gXZM<>}OfYRv=cLuS56$G7cDY#16R=GN4NTJ)L+U@JX zjEi^K2V&t@^uoO!#yDj8(Z{hCm6kkp%^^TS5> z+9XZ~7i>(kv!r28 zd-&n+AK19p<{a&oo~+9jm(iwM3|@J&^k4XOZk+Gy^`m!EreCgXHRWye_)V!ux%SYG(t7RKw4&n4X9W@(tVGXCn??cJSEC<&ZCP6p4E!E$> zqMjz=EFhB~hML6T1uOR6!SYp4iJYhTd(#6v4v&UQM{2~ab3`E3z#JlUkagG%yIWl& zH6O@&oSwjHQnD6BJ-f19Yi;XML_?ggL&bXVS*o&){e6wUkYEaow1^`peqHtkqSicz zU87gQJDcvdHh1;LdGb~ATKU#OE-NQ$1@%C3a@&t-ZY`1du$3Rl&$shE-|KL59bU@y zLv3V^%M?nlh%viA3Q$y0sK#8Bu*^Zo`kRTsjMKk*#KChac08i^uRQq(!!ESBv*3f>^kFQ>sXzZa@1 zb#NoV%fL9opv7sMv$nTFSJ`GumO&{0?Sm;n8cMr&GneGYtBLZpz0J6(#i60Wdo5&a zCl<~QuaEUm`eklW2RiPme1_|7zfW&eKEPRpCf@|{fHu1*E>dqZ+IDA~wI-2&PJi@mZhcVgTo3oG#1 z6{a4(Fb!gG-XW2E8I( zCu8Im)5|`Q6%6>#7$>VS*`}_;&{7?ULRdR6TFxRFn7p4oct3w-;jC_7kxM^1YP@Ca zyPd^&i9KTxux96gr#e-p=Ag~_da9p_hxOviKbP_^kf4)X5t5Z4LiTKM7a8cLKdt>t z?D3zH(3qrh7*Ox@j#OFm$EKW&GP|kNKStdQ9TS-OgmT=smfm#YB-775U|~qA)53_0 ztwnXXEqJa3ah*%(^-9^eF`FX~Y zjMkJ4rLIQ&QY!B}^Lm~fz;`NG<@Y0l@QDs?!p;n?SFP6%e=_;>EgXuF>;%huhCk8? zRr{c?=)zM@Qg%P_+Fg8$E~vaA*qDd95AAhXdCd~C48B86CLcc@6yb7FH;Ihf!kGIIDeRSb{k2m zb+1A>HuI$Z0i1qK9XmLR`PD;aS?~8-xHqxp`n^zp_*pqgiE!Uo4rJ&nIZcb$BD2T+ z_mPNts)nVQ@ATlz`9P;y4^niT%5GYc{zCBYoz34{bVqhXmBG$Ulj>T&zrd|u-vxhp z@A=a#)mDj$=YRbCsUbKGPUj0P_dhS+TO?eIJ65=%)c^TBy!{+p2mzXgwJp_&zn$0r z|Di1?$AEq10f@BFZQa!PNQt#Tx~Mzyp9>5>woeVpm>3+77X~PrM#VVSR-n+k=ga@M z_x-fGu)9Z-?HiWXPN_74Nt&FllQI#_IcexktO;cm+mbRQP5kF z68q`t6zMoGk&l_M5p&uj4 zD=8`XZ^r}IMUf8N7pGB0ycd6vXDj#VfB{frp35reBf`ymGbODp4S zSR+p@s1@!H2cEE~BpmIf)&}N=yWEoNB!53K_&w0f4&fk5;=}(Mny+Y?f8(pF`ujUp zQTj6k`AN;HtlfxoTGX<}kE~yz{N59WSCLXuQV=OiMrw4>n?(?CEgrPi0VCVr{T+a~ z;bNI10_FwH;?{-%no3%oum60`?#n#Q8ZVc#{r#>|@7?xU(YL7D!9aR;~!vAv8<7r6r$w zAKN2fJJ+~)Dn;T~>pZX!wBC#U)Q=Z{3l2c`J)HvZNSor^XB9Si8z^VL%PcxuN8pb*hmft_?@i?xNWriEkW|?{wXMvLM9Kw1j+#PwEFH>E&F$sPD55ofdjC_HOz~g3Rw{j2aC*Sw&TH;)^_4 zk?_);v(;d))U^YSg9Y5J{sctz09*|K5T^iupC7O_0f>Sl4OH*eA1~I>{|BlDMXC%4 zcp7)hg9jFAf?A2=zHctKkxu#}S zD=oQ{s%v`F(U1BwQl#W*xV{p+YmQMN=zjQX1ITgL2S82i#2}$>F;Do)-1VS`t}R-U zl<*fiefB##jSgr5>z{8xJdaMvD_{ezbiD>CbR^9n&91zMHq{>vP=oRtEx|BL5FkA= z)5$qw+v1saaU3LkN%3Y0AeJ?IAUePwpuse`A#vwc_CM+KKR&Q!dY3a{bJ5l zy$K*`_7h-YU-o11PKz!Exok9cx*$OpMj6V8EfGmd0tekM%?~}UK^>bmVALa#EP$6r zDIZgo2>{j~o&+~Q*6B}{>+c_e{QxPvmO76CsMGW|=?0YbBI>&W#L7S{^9JV9({2O| zz2f?M-k&=&+{I1D)A|6g4af3%^SI?fdJ3t0TrWA?Z=9|Z?1i$ zdV9L@ZOVNK;YGOi08s)Td)iq;YkCt#HKMe7hh+Tft{$jqSA5YJX@;YDZ$E^9 zaPy_w0dTw8_mxvOM`q&aD>4Z(gsq7(z1f2w7hor~w7%FD)a6(o*xUo`C%BQ(WV;gQ z8rqTgKZZpy!aYN34)PR~3%uCc5U;Z(OWuYXL~r^%1*rhp-OuRB8H2Os1GTP`O*2CF zXMTa&_#lcZtbX&?oRaC^2|8eK19z<{;BhX#^`8v|=ehYC!lVKW#XsSa9@Rr=`dP zoa_jS%Qnkc53R5vFrH-a7j;8M!mZ#8z)lUO|MKZ3}&KMedX_R~I%110FW1n-oB z+3ilh8{jWP1D^IU#OkI`IgWTM30Q&zRKe?A&kY@Ep;${g#!%`CYG_})--RnkO=WTE z_WU)vDw-m>vZ!IRZ+E%=SZ3ni%+Fq$<+!_57jcb*XaT2MKuo0<4=8E2bX;6H`0k3Z zbRciYg)yhkG$K!E1a2^UBXmU)=5@<&xnD9Hqob96S4Y5MXLVk;$>8pt3&}{$&qVD@ zV;Bge5Z8CE&$j{3vIqX2h5o}L-^taJsQXL}KJj*%;eJ4*fL*^1@3&Ae=$f8^LV};B+|1`)>Vpm;h#Ed+728j6IkxjKEnnROBb9p=7vEa8c)+ z6R-Mi;xKCcS-)GbPz2m3w@Uo*~a=7XHM>5rL5#rhnU4r3@R_+GL^yY3znP4 zG1=2_91*`An;Oz5xl&xemZGFc1%1SSf(FYN5`Dt57U}gf+x5P87tD_UUqc4|?Iao!QVIcQwT;5E5iY*skQ{YUzekD&p>6h1!L-jIR_^#qq z5Pp~ox)M8JLiC_j9N*$}r(dTD-N6zNn;lMkMdsJ_G4cEnm@ZBgX{#ivyVB1r@1Z34(A8*U{7cTb#9%4S)>3?sUg`R zeJnCQ3qj|1>g@NyJYq!I(dO+>GNP{h%+?7Uun!~}%ymt?THgO!jAsg=>@hD+>6*KP zfo{kt?PQ10G>vWI3{8v_!1eZXJR`J^n&4Gzjs)e$u$0I*nj(;yZqiSvizJ`$^c;y{ z4>)|g@zTRkfJ@X$;z+(Xfkr{}kOsL)-+538p;MhT;EJO%Aw&^IF~>U=s(rj|F(8o-fKhyWAgM;g;V^TDXGY z#Bn3x!eT+K9Xqga@xHc0Q@h-;^?ulfkENRvX!o?09PvOx2gTk<=G!B+14arV%!;`8 z2NcJr8Td3Gzv@mMoH?zrpT*xZP$wUbRq8bISQGmEe^&8QVO6E)NB0>@tJ4$j1n;>r z>FH&U-;)d;Eu9)ykWL)GTyh_BLQ`dc4x#JcxP@V?by z=E5Mn>_%X+ky(?(#nUkkHdQ~rDag9W$Q}OU;N*V!yA7l{AaPpWJaLuP5O->R`sAaKV$BNfuL=Kzo*r&KF7&Y{r_gTUv@^`fOFd4Vt$YV&TX=nDzTvSy z4lPCe>;EUHYYgecyzzb4zn0HCisX+pKNb5+uK;9atCV zi9F=+W)rlSf7I{6VsOvTp;j|CM|?KGP%J^k?-t zaJT)6J|o+0j?#SnH5;tu(l;3X-@W$p?Jl5$!me}AxF1x#7ucg0W@_24c+NoWPT4g9 zrl&_7yoB{n9okSOl3DODHPjQho$jfN)bUB-UFVlxi`;lc(s8a!?5%0jj=qOf3>GZA z7IP$D*s`}ubZaZLzuD*~lyFWkF@I`Lz$ugXRhKN^!G>!VeB@Z-u-omn*w&>;YJZkG z0aptyEQHQ!0mm{<2rKMrwS4*VdGx2_yFRDCin($^t~IKB-{&>a&O#0yn#ZIzy<7{t zsTedQw%{_$58nVWrsRj~+>Dm-^2pomnGhPbLUdE4&J^Ua3o$vyJzi^8tPyQ**uOYz z&bAFI$USagcV3CHb(VP(vKH`?t^n{-hCWnrpwjOsLr|kiafd+TJf#T^%7Qa23X_yW zukS({3c8}%xa{W4tC=@-x1ok_W1i=M3&- Date: Tue, 28 Jul 2015 17:10:21 -0700 Subject: [PATCH 052/100] Updated AWS SDK calls to match the 0.7.0 release of the AWS SDK --- builder/amazon/chroot/step_create_volume.go | 5 ++--- builder/amazon/chroot/step_register_ami.go | 4 ++-- builder/amazon/common/access_config.go | 4 ++-- builder/amazon/common/artifact.go | 2 +- builder/amazon/common/block_device.go | 8 +++---- builder/amazon/common/block_device_test.go | 21 +++++++++---------- builder/amazon/common/step_ami_region_copy.go | 9 ++++---- builder/amazon/common/step_create_tags.go | 2 +- .../common/step_modify_ami_attributes.go | 2 +- .../amazon/common/step_run_source_instance.go | 12 +++++------ builder/amazon/common/step_security_group.go | 4 ++-- builder/amazon/ebs/step_create_ami.go | 2 +- builder/amazon/instance/step_register_ami.go | 2 +- 13 files changed, 38 insertions(+), 39 deletions(-) diff --git a/builder/amazon/chroot/step_create_volume.go b/builder/amazon/chroot/step_create_volume.go index 40925483a..9db99163a 100644 --- a/builder/amazon/chroot/step_create_volume.go +++ b/builder/amazon/chroot/step_create_volume.go @@ -5,7 +5,6 @@ import ( "log" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" @@ -52,12 +51,12 @@ func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction { } createVolume := &ec2.CreateVolumeInput{ AvailabilityZone: instance.Placement.AvailabilityZone, - Size: aws.Long(vs), + Size: aws.Int64(vs), SnapshotID: rootDevice.EBS.SnapshotID, VolumeType: rootDevice.EBS.VolumeType, IOPS: rootDevice.EBS.IOPS, } - log.Printf("Create args: %s", awsutil.StringValue(createVolume)) + log.Printf("Create args: %s", createVolume) createVolumeResp, err := ec2conn.CreateVolume(createVolume) if err != nil { diff --git a/builder/amazon/chroot/step_register_ami.go b/builder/amazon/chroot/step_register_ami.go index 5314ef0a1..8ed4df9b9 100644 --- a/builder/amazon/chroot/step_register_ami.go +++ b/builder/amazon/chroot/step_register_ami.go @@ -34,7 +34,7 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { } if s.RootVolumeSize > *newDevice.EBS.VolumeSize { - newDevice.EBS.VolumeSize = aws.Long(s.RootVolumeSize) + newDevice.EBS.VolumeSize = aws.Int64(s.RootVolumeSize) } } @@ -64,7 +64,7 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { // Set the AMI ID in the state ui.Say(fmt.Sprintf("AMI: %s", *registerResp.ImageID)) amis := make(map[string]string) - amis[ec2conn.Config.Region] = *registerResp.ImageID + amis[*ec2conn.Config.Region] = *registerResp.ImageID state.Put("amis", amis) // Wait for the image to become ready diff --git a/builder/amazon/common/access_config.go b/builder/amazon/common/access_config.go index 4479e0181..88bda0423 100644 --- a/builder/amazon/common/access_config.go +++ b/builder/amazon/common/access_config.go @@ -40,9 +40,9 @@ func (c *AccessConfig) Config() (*aws.Config, error) { } return &aws.Config{ - Region: region, + Region: aws.String(region), Credentials: creds, - MaxRetries: 11, + MaxRetries: aws.Int(11), }, nil } diff --git a/builder/amazon/common/artifact.go b/builder/amazon/common/artifact.go index 7b2537072..aba2ffde4 100644 --- a/builder/amazon/common/artifact.go +++ b/builder/amazon/common/artifact.go @@ -70,7 +70,7 @@ func (a *Artifact) Destroy() error { regionConfig := &aws.Config{ Credentials: a.Conn.Config.Credentials, - Region: region, + Region: aws.String(region), } regionConn := ec2.New(regionConfig) diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go index fb14a66ae..f009cd7bc 100644 --- a/builder/amazon/common/block_device.go +++ b/builder/amazon/common/block_device.go @@ -32,20 +32,20 @@ func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping { for _, blockDevice := range b { ebsBlockDevice := &ec2.EBSBlockDevice{ VolumeType: aws.String(blockDevice.VolumeType), - VolumeSize: aws.Long(blockDevice.VolumeSize), - DeleteOnTermination: aws.Boolean(blockDevice.DeleteOnTermination), + VolumeSize: aws.Int64(blockDevice.VolumeSize), + DeleteOnTermination: aws.Bool(blockDevice.DeleteOnTermination), } // IOPS is only valid for SSD Volumes if blockDevice.VolumeType != "" && blockDevice.VolumeType != "standard" && blockDevice.VolumeType != "gp2" { - ebsBlockDevice.IOPS = aws.Long(blockDevice.IOPS) + ebsBlockDevice.IOPS = aws.Int64(blockDevice.IOPS) } // You cannot specify Encrypted if you specify a Snapshot ID if blockDevice.SnapshotId != "" { ebsBlockDevice.SnapshotID = aws.String(blockDevice.SnapshotId) } else if blockDevice.Encrypted { - ebsBlockDevice.Encrypted = aws.Boolean(blockDevice.Encrypted) + ebsBlockDevice.Encrypted = aws.Bool(blockDevice.Encrypted) } mapping := &ec2.BlockDeviceMapping{ diff --git a/builder/amazon/common/block_device_test.go b/builder/amazon/common/block_device_test.go index c69ef2efb..d76cf4d07 100644 --- a/builder/amazon/common/block_device_test.go +++ b/builder/amazon/common/block_device_test.go @@ -5,7 +5,6 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/service/ec2" ) @@ -29,8 +28,8 @@ func TestBlockDevice(t *testing.T) { EBS: &ec2.EBSBlockDevice{ SnapshotID: aws.String("snap-1234"), VolumeType: aws.String("standard"), - VolumeSize: aws.Long(8), - DeleteOnTermination: aws.Boolean(true), + VolumeSize: aws.Int64(8), + DeleteOnTermination: aws.Bool(true), }, }, }, @@ -45,8 +44,8 @@ func TestBlockDevice(t *testing.T) { VirtualName: aws.String(""), EBS: &ec2.EBSBlockDevice{ VolumeType: aws.String(""), - VolumeSize: aws.Long(8), - DeleteOnTermination: aws.Boolean(false), + VolumeSize: aws.Int64(8), + DeleteOnTermination: aws.Bool(false), }, }, }, @@ -64,9 +63,9 @@ func TestBlockDevice(t *testing.T) { VirtualName: aws.String(""), EBS: &ec2.EBSBlockDevice{ VolumeType: aws.String("io1"), - VolumeSize: aws.Long(8), - DeleteOnTermination: aws.Boolean(true), - IOPS: aws.Long(1000), + VolumeSize: aws.Int64(8), + DeleteOnTermination: aws.Bool(true), + IOPS: aws.Int64(1000), }, }, }, @@ -93,13 +92,13 @@ func TestBlockDevice(t *testing.T) { got := blockDevices.BuildAMIDevices() if !reflect.DeepEqual(expected, got) { t.Fatalf("Bad block device, \nexpected: %s\n\ngot: %s", - awsutil.StringValue(expected), awsutil.StringValue(got)) + expected, got) } if !reflect.DeepEqual(expected, blockDevices.BuildLaunchDevices()) { t.Fatalf("Bad block device, \nexpected: %s\n\ngot: %s", - awsutil.StringValue(expected), - awsutil.StringValue(blockDevices.BuildLaunchDevices())) + expected, + blockDevices.BuildLaunchDevices()) } } } diff --git a/builder/amazon/common/step_ami_region_copy.go b/builder/amazon/common/step_ami_region_copy.go index 3f545284f..d19ffe5bd 100644 --- a/builder/amazon/common/step_ami_region_copy.go +++ b/builder/amazon/common/step_ami_region_copy.go @@ -5,6 +5,7 @@ import ( "sync" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" @@ -21,7 +22,7 @@ func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction { ec2conn := state.Get("ec2").(*ec2.EC2) ui := state.Get("ui").(packer.Ui) amis := state.Get("amis").(map[string]string) - ami := amis[ec2conn.Config.Region] + ami := amis[*ec2conn.Config.Region] if len(s.Regions) == 0 { return multistep.ActionContinue @@ -33,7 +34,7 @@ func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction { var wg sync.WaitGroup errs := new(packer.MultiError) for _, region := range s.Regions { - if region == ec2conn.Config.Region { + if region == *ec2conn.Config.Region { ui.Message(fmt.Sprintf( "Avoiding copying AMI to duplicate region %s", region)) continue @@ -44,7 +45,7 @@ func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction { go func(region string) { defer wg.Done() - id, err := amiRegionCopy(state, s.AccessConfig, s.Name, ami, region, ec2conn.Config.Region) + id, err := amiRegionCopy(state, s.AccessConfig, s.Name, ami, region, *ec2conn.Config.Region) lock.Lock() defer lock.Unlock() @@ -84,7 +85,7 @@ func amiRegionCopy(state multistep.StateBag, config *AccessConfig, name string, if err != nil { return "", err } - awsConfig.Region = target + awsConfig.Region = aws.String(target) regionconn := ec2.New(awsConfig) resp, err := regionconn.CopyImage(&ec2.CopyImageInput{ diff --git a/builder/amazon/common/step_create_tags.go b/builder/amazon/common/step_create_tags.go index 4750d7a08..220735bed 100644 --- a/builder/amazon/common/step_create_tags.go +++ b/builder/amazon/common/step_create_tags.go @@ -36,7 +36,7 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction { regionconn := ec2.New(&aws.Config{ Credentials: ec2conn.Config.Credentials, - Region: region, + Region: aws.String(region), }) // Retrieve image list for given AMI diff --git a/builder/amazon/common/step_modify_ami_attributes.go b/builder/amazon/common/step_modify_ami_attributes.go index ff0352a1f..df6424245 100644 --- a/builder/amazon/common/step_modify_ami_attributes.go +++ b/builder/amazon/common/step_modify_ami_attributes.go @@ -90,7 +90,7 @@ func (s *StepModifyAMIAttributes) Run(state multistep.StateBag) multistep.StepAc ui.Say(fmt.Sprintf("Modifying attributes on AMI (%s)...", ami)) regionconn := ec2.New(&aws.Config{ Credentials: ec2conn.Config.Credentials, - Region: region, + Region: aws.String(region), }) for name, input := range options { ui.Message(fmt.Sprintf("Modifying: %s", name)) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index b94a6031c..6482b8084 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -141,8 +141,8 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi ImageID: &s.SourceAMI, InstanceType: &s.InstanceType, UserData: &userData, - MaxCount: aws.Long(1), - MinCount: aws.Long(1), + MaxCount: aws.Int64(1), + MinCount: aws.Int64(1), IAMInstanceProfile: &ec2.IAMInstanceProfileSpecification{Name: &s.IamInstanceProfile}, BlockDeviceMappings: s.BlockDevices.BuildLaunchDevices(), Placement: &ec2.Placement{AvailabilityZone: &s.AvailabilityZone}, @@ -151,11 +151,11 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi if s.SubnetId != "" && s.AssociatePublicIpAddress { runOpts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{ &ec2.InstanceNetworkInterfaceSpecification{ - DeviceIndex: aws.Long(0), + DeviceIndex: aws.Int64(0), AssociatePublicIPAddress: &s.AssociatePublicIpAddress, SubnetID: &s.SubnetId, Groups: securityGroupIds, - DeleteOnTermination: aws.Boolean(true), + DeleteOnTermination: aws.Bool(true), }, } } else { @@ -185,11 +185,11 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi IAMInstanceProfile: &ec2.IAMInstanceProfileSpecification{Name: &s.IamInstanceProfile}, NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{ &ec2.InstanceNetworkInterfaceSpecification{ - DeviceIndex: aws.Long(0), + DeviceIndex: aws.Int64(0), AssociatePublicIPAddress: &s.AssociatePublicIpAddress, SubnetID: &s.SubnetId, Groups: securityGroupIds, - DeleteOnTermination: aws.Boolean(true), + DeleteOnTermination: aws.Bool(true), }, }, Placement: &ec2.SpotPlacement{ diff --git a/builder/amazon/common/step_security_group.go b/builder/amazon/common/step_security_group.go index b65ebb408..e43e866a3 100644 --- a/builder/amazon/common/step_security_group.go +++ b/builder/amazon/common/step_security_group.go @@ -59,8 +59,8 @@ func (s *StepSecurityGroup) Run(state multistep.StateBag) multistep.StepAction { req := &ec2.AuthorizeSecurityGroupIngressInput{ GroupID: groupResp.GroupID, IPProtocol: aws.String("tcp"), - FromPort: aws.Long(int64(port)), - ToPort: aws.Long(int64(port)), + FromPort: aws.Int64(int64(port)), + ToPort: aws.Int64(int64(port)), CIDRIP: aws.String("0.0.0.0/0"), } diff --git a/builder/amazon/ebs/step_create_ami.go b/builder/amazon/ebs/step_create_ami.go index dff7d88b0..a3980e3ee 100644 --- a/builder/amazon/ebs/step_create_ami.go +++ b/builder/amazon/ebs/step_create_ami.go @@ -38,7 +38,7 @@ func (s *stepCreateAMI) Run(state multistep.StateBag) multistep.StepAction { // Set the AMI ID in the state ui.Message(fmt.Sprintf("AMI: %s", *createResp.ImageID)) amis := make(map[string]string) - amis[ec2conn.Config.Region] = *createResp.ImageID + amis[*ec2conn.Config.Region] = *createResp.ImageID state.Put("amis", amis) // Wait for the image to become ready diff --git a/builder/amazon/instance/step_register_ami.go b/builder/amazon/instance/step_register_ami.go index f97c5df0e..dc76331f8 100644 --- a/builder/amazon/instance/step_register_ami.go +++ b/builder/amazon/instance/step_register_ami.go @@ -44,7 +44,7 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { // Set the AMI ID in the state ui.Say(fmt.Sprintf("AMI: %s", *registerResp.ImageID)) amis := make(map[string]string) - amis[ec2conn.Config.Region] = *registerResp.ImageID + amis[*ec2conn.Config.Region] = *registerResp.ImageID state.Put("amis", amis) // Wait for the image to become ready From 585638d06384dacffd9bd1163460430661764299 Mon Sep 17 00:00:00 2001 From: Bob Kuo Date: Tue, 28 Jul 2015 17:21:37 -0500 Subject: [PATCH 053/100] Do not require exclusive VNC access while buildling An additional client can be connected during build time for inspection. We can manually connect and set our VNC clients to ignore all input or we can connect with vnc2flv to record the build session for later verification. --- builder/qemu/step_type_boot_command.go | 2 +- builder/vmware/common/step_type_boot_command.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/qemu/step_type_boot_command.go b/builder/qemu/step_type_boot_command.go index e42903f55..b97241b0b 100644 --- a/builder/qemu/step_type_boot_command.go +++ b/builder/qemu/step_type_boot_command.go @@ -52,7 +52,7 @@ func (s *stepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction } defer nc.Close() - c, err := vnc.Client(nc, &vnc.ClientConfig{Exclusive: true}) + c, err := vnc.Client(nc, &vnc.ClientConfig{Exclusive: false}) if err != nil { err := fmt.Errorf("Error handshaking with VNC: %s", err) state.Put("error", err) diff --git a/builder/vmware/common/step_type_boot_command.go b/builder/vmware/common/step_type_boot_command.go index b23ede1da..3959e5517 100644 --- a/builder/vmware/common/step_type_boot_command.go +++ b/builder/vmware/common/step_type_boot_command.go @@ -57,7 +57,7 @@ func (s *StepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction } defer nc.Close() - c, err := vnc.Client(nc, &vnc.ClientConfig{Exclusive: true}) + c, err := vnc.Client(nc, &vnc.ClientConfig{Exclusive: false}) if err != nil { err := fmt.Errorf("Error handshaking with VNC: %s", err) state.Put("error", err) From 9030c1fa3ad6d357ccccca288e7d7e81c42edbd7 Mon Sep 17 00:00:00 2001 From: Cameron Stokes Date: Thu, 30 Jul 2015 10:54:28 -0700 Subject: [PATCH 054/100] Fix funny characters. --- website/source/community/index.html.markdown | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/source/community/index.html.markdown b/website/source/community/index.html.markdown index 3951e909f..31facf753 100644 --- a/website/source/community/index.html.markdown +++ b/website/source/community/index.html.markdown @@ -11,12 +11,12 @@ page_title: Community Packer is a new project with a growing community. Despite this, there are dedicated users willing to help through various mediums. -**IRC:** `#packer-tool` on Freenode. +**IRC:** `#packer-tool` on Freenode. -**Mailing List:** [Packer Google +**Mailing List:** [Packer Google Group](http://groups.google.com/group/packer-tool) -**Bug Tracker:** [Issue tracker on +**Bug Tracker:** [Issue tracker on GitHub](https://github.com/mitchellh/packer/issues). Please only use this for reporting bugs. Do not ask for general help here. Use IRC or the mailing list for that. From 66b7b9a0b7fd31bd56c4b24abf2266052f6f0fb1 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 30 Jul 2015 15:58:12 -0700 Subject: [PATCH 055/100] Added a section to explain more clearly how to reference external resources like boot ISOs --- .../source/docs/command-line/push.html.markdown | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/website/source/docs/command-line/push.html.markdown b/website/source/docs/command-line/push.html.markdown index 140c996d3..bb35a9d40 100644 --- a/website/source/docs/command-line/push.html.markdown +++ b/website/source/docs/command-line/push.html.markdown @@ -77,17 +77,11 @@ build artifacts larger than 5gb, and Atlas *can* store artifacts larger than images), you will need to put your boot ISO in an external web service and download it during the packer run. -The easiest way to host these in a secure fashion is to upload your ISO to -[Amazon -S3](http://docs.aws.amazon.com/AmazonS3/latest/dev/ShareObjectPreSignedURL.html) -or [Google Cloud -Storage](https://cloud.google.com/storage/docs/gsutil/commands/signurl) and -download it using a signed URL. You can inject the signed URL into your build by -using a build variable (environment variable) in Atlas. Example: +## Building Private `.iso` and `.dmg` Files + +If you want to build a private `.iso` file you can upload the `.iso` to a secure file hosting service like [Amazon S3](http://docs.aws.amazon.com/AmazonS3/latest/dev/ShareObjectPreSignedURL.html), [Google Cloud Storage](https://cloud.google.com/storage/docs/gsutil/commands/signurl), or [Azure File Service](https://msdn.microsoft.com/en-us/library/azure/dn194274.aspx) and download it at build time using a signed URL. You should convert `.dmg` files to `.iso` and follow a similar procedure. + +Once you have added [variables in your packer template](/docs/templates/user-variables.html) you can specify credentials or signed URLs using Atlas environment variables, or via the `-var` flag when you run `push`. ![Configure your signed URL in the Atlas build variables menu](/assets/images/packer-signed-urls.png) - -You will also need to [configure your packer -template](http://stormchaser.local:4567/docs/templates/user-variables.html) to -use the variable injected by Atlas (or via `push -var`). From 5218c5a65b674c947e104fcdd4dc5b43dd41830e Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 30 Jul 2015 16:09:40 -0700 Subject: [PATCH 056/100] Reformat --- .../source/docs/command-line/push.html.markdown | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/website/source/docs/command-line/push.html.markdown b/website/source/docs/command-line/push.html.markdown index bb35a9d40..57ea58cf0 100644 --- a/website/source/docs/command-line/push.html.markdown +++ b/website/source/docs/command-line/push.html.markdown @@ -79,9 +79,20 @@ download it during the packer run. ## Building Private `.iso` and `.dmg` Files -If you want to build a private `.iso` file you can upload the `.iso` to a secure file hosting service like [Amazon S3](http://docs.aws.amazon.com/AmazonS3/latest/dev/ShareObjectPreSignedURL.html), [Google Cloud Storage](https://cloud.google.com/storage/docs/gsutil/commands/signurl), or [Azure File Service](https://msdn.microsoft.com/en-us/library/azure/dn194274.aspx) and download it at build time using a signed URL. You should convert `.dmg` files to `.iso` and follow a similar procedure. +If you want to build a private `.iso` file you can upload the `.iso` to a secure +file hosting service like [Amazon +S3](http://docs.aws.amazon.com/AmazonS3/latest/dev/ShareObjectPreSignedURL.html), +[Google Cloud +Storage](https://cloud.google.com/storage/docs/gsutil/commands/signurl), or +[Azure File +Service](https://msdn.microsoft.com/en-us/library/azure/dn194274.aspx) and +download it at build time using a signed URL. You should convert `.dmg` files to +`.iso` and follow a similar procedure. -Once you have added [variables in your packer template](/docs/templates/user-variables.html) you can specify credentials or signed URLs using Atlas environment variables, or via the `-var` flag when you run `push`. +Once you have added [variables in your packer +template](/docs/templates/user-variables.html) you can specify credentials or +signed URLs using Atlas environment variables, or via the `-var` flag when you +run `push`. ![Configure your signed URL in the Atlas build variables menu](/assets/images/packer-signed-urls.png) From 87bcfc3ef7b68ffa2d62c4d0b6e026c337107f7a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 30 Jul 2015 16:15:55 -0700 Subject: [PATCH 057/100] Ignore internal packages for go 1.5; thanks @dlsniper --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 884d6bbf2..2fb386399 100644 --- a/Makefile +++ b/Makefile @@ -36,6 +36,7 @@ updatedeps: go list ./... \ | xargs go list -f '{{join .Deps "\n"}}' \ | grep -v github.com/mitchellh/packer \ + | grep -v '/internal/' \ | sort -u \ | xargs go get -f -u -v From 32b714e0853072a61772dfff868f13d9eeec38e4 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 30 Jul 2015 19:19:48 -0700 Subject: [PATCH 058/100] Update code.google.com/gosshold/ssh to point to golang.org/x/crypto/ssh, since this has been moved into core now Fixes #2515 --- builder/digitalocean/step_create_ssh_key.go | 2 +- builder/googlecompute/step_create_ssh_key.go | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/builder/digitalocean/step_create_ssh_key.go b/builder/digitalocean/step_create_ssh_key.go index a99fd930d..ce65cb425 100644 --- a/builder/digitalocean/step_create_ssh_key.go +++ b/builder/digitalocean/step_create_ssh_key.go @@ -10,11 +10,11 @@ import ( "os" "runtime" - "code.google.com/p/gosshold/ssh" "github.com/digitalocean/godo" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/common/uuid" "github.com/mitchellh/packer/packer" + "golang.org/x/crypto/ssh" ) type stepCreateSSHKey struct { diff --git a/builder/googlecompute/step_create_ssh_key.go b/builder/googlecompute/step_create_ssh_key.go index bbf048ee7..521e6c3d6 100644 --- a/builder/googlecompute/step_create_ssh_key.go +++ b/builder/googlecompute/step_create_ssh_key.go @@ -1,15 +1,16 @@ package googlecompute import ( - "code.google.com/p/gosshold/ssh" "crypto/rand" "crypto/rsa" "crypto/x509" "encoding/pem" "fmt" + "os" + "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" - "os" + "golang.org/x/crypto/ssh" ) // StepCreateSSHKey represents a Packer build step that generates SSH key pairs. From 015742b5470ed0c399d2e9fbeec5ed270dfc692a Mon Sep 17 00:00:00 2001 From: Gabriel Sobrinho Date: Fri, 31 Jul 2015 00:01:00 -0300 Subject: [PATCH 059/100] Fix last example syntax --- .../getting-started/remote-builds.html.markdown | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/website/source/intro/getting-started/remote-builds.html.markdown b/website/source/intro/getting-started/remote-builds.html.markdown index f37a5a5ad..6ddb4ece3 100644 --- a/website/source/intro/getting-started/remote-builds.html.markdown +++ b/website/source/intro/getting-started/remote-builds.html.markdown @@ -90,7 +90,19 @@ it's even better to store and version the AMI output so it can be easily deployed by a tool like [Terraform](https://terraform.io). The `atlas` [post-processor](/docs/post-processors/atlas.html) makes this process simple: -`javascript { "variables": ["..."], "builders": ["..."], "provisioners": ["..."], "push": ["..."], "post-processors": [ { "type": "atlas", "artifact": "ATLAS_USERNAME/packer-tutorial", "artifact_type": "amazon.ami" } ] }` +``` {.javascript} +{ + "variables": ["..."], + "builders": ["..."], + "provisioners": ["..."], + "push": ["..."], + "post-processors": [{ + "type": "atlas", + "artifact": "ATLAS_USERNAME/packer-tutorial", + "artifact_type": "amazon.ami" + }] +} +``` Update the `post-processors` block with your Atlas username, then `packer push example.json` and watch the build kick off in Atlas! When the build From 3c517a65c3c0b1f995f12ab9c96ad30c1527fb8f Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 30 Jul 2015 21:16:11 -0700 Subject: [PATCH 060/100] Autoreformat --- website/source/docs/builders/openstack.html.markdown | 4 ++-- .../source/docs/provisioners/salt-masterless.html.markdown | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/website/source/docs/builders/openstack.html.markdown b/website/source/docs/builders/openstack.html.markdown index 56db25474..12a1ca882 100644 --- a/website/source/docs/builders/openstack.html.markdown +++ b/website/source/docs/builders/openstack.html.markdown @@ -100,8 +100,8 @@ builder. Rackconnect to assign the machine an IP address before connecting via SSH. Defaults to false. -- `metadata` (object of key/value strings) - Glance metadata that will be applied - to the image. +- `metadata` (object of key/value strings) - Glance metadata that will be + applied to the image. ## Basic Example: Rackspace public cloud diff --git a/website/source/docs/provisioners/salt-masterless.html.markdown b/website/source/docs/provisioners/salt-masterless.html.markdown index 1eeabaf14..adb1c4bb3 100644 --- a/website/source/docs/provisioners/salt-masterless.html.markdown +++ b/website/source/docs/provisioners/salt-masterless.html.markdown @@ -54,8 +54,8 @@ Optional: tree](http://docs.saltstack.com/ref/states/highstate.html#the-salt-state-tree). This will be uploaded to the `remote_state_tree` on the remote. -- `minion_config` (string) - The path to your local [minion - config file](http://docs.saltstack.com/ref/configuration/minion.html). This will be +- `minion_config` (string) - The path to your local [minion config + file](http://docs.saltstack.com/ref/configuration/minion.html). This will be uploaded to the `/etc/salt` on the remote. - `skip_bootstrap` (boolean) - By default the salt provisioner runs [salt From 98b9d22b68053f751e7ffedbdc85499537772a67 Mon Sep 17 00:00:00 2001 From: Florian Noeding Date: Fri, 31 Jul 2015 15:34:25 +0200 Subject: [PATCH 061/100] amazon builder: only fetch password for winrm --- builder/amazon/common/step_get_password.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/builder/amazon/common/step_get_password.go b/builder/amazon/common/step_get_password.go index 08a9c7b66..fec33891f 100644 --- a/builder/amazon/common/step_get_password.go +++ b/builder/amazon/common/step_get_password.go @@ -26,11 +26,10 @@ type StepGetPassword struct { func (s *StepGetPassword) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) - image := state.Get("source_image").(*ec2.Image) - // Skip if we're not Windows... - if image.Platform == nil || *image.Platform != "windows" { - log.Printf("[INFO] Not Windows, skipping get password...") + // Skip if we're not using winrm + if s.Comm.Type != "winrm" { + log.Printf("[INFO] Not using winrm communicator, skipping get password...") return multistep.ActionContinue } From 263641c53799c20c7f9b23bcdd5097c97ca22194 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Sat, 1 Aug 2015 15:09:59 -0700 Subject: [PATCH 062/100] Fix case for ethernet.generatedAddress property lookup in VMX --- builder/vmware/common/ssh.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/vmware/common/ssh.go b/builder/vmware/common/ssh.go index 86e184bb5..9db075a71 100644 --- a/builder/vmware/common/ssh.go +++ b/builder/vmware/common/ssh.go @@ -39,7 +39,7 @@ func CommHost(config *SSHConfig) func(multistep.StateBag) (string, error) { var ok bool macAddress := "" if macAddress, ok = vmxData["ethernet0.address"]; !ok || macAddress == "" { - if macAddress, ok = vmxData["ethernet0.generatedaddress"]; !ok || macAddress == "" { + if macAddress, ok = vmxData["ethernet0.generatedAddress"]; !ok || macAddress == "" { return "", errors.New("couldn't find MAC address in VMX") } } From e0d46685ea6f0ff545aa561eac858ae7451eaa40 Mon Sep 17 00:00:00 2001 From: Dane Elwell Date: Mon, 3 Aug 2015 17:53:33 +0100 Subject: [PATCH 063/100] Document remote_port option --- website/source/docs/builders/vmware-iso.html.markdown | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/source/docs/builders/vmware-iso.html.markdown b/website/source/docs/builders/vmware-iso.html.markdown index 8ac3a9fd3..35c1fb05b 100644 --- a/website/source/docs/builders/vmware-iso.html.markdown +++ b/website/source/docs/builders/vmware-iso.html.markdown @@ -369,6 +369,8 @@ fill in the required `remote_*` configurations: Additionally, there are some optional configurations that you'll likely have to modify as well: +* `remote_port` - The SSH port of the remote machine + * `remote_datastore` - The path to the datastore where the VM will be stored on the ESXi machine. From db1a781b6ef493754fc17e62eca2724e2a7efc48 Mon Sep 17 00:00:00 2001 From: Tyler Tidman Date: Mon, 3 Aug 2015 13:02:01 -0400 Subject: [PATCH 064/100] Rename .html.md files to .html.markdown Fixes #2546. Make files under website/source/docs/provisioners conform to standards for rest of docs. --- .../{shell-local.html.md => shell-local.html.markdown} | 0 .../{windows-restart.html.md => windows-restart.html.markdown} | 0 .../{windows-shell.html.md => windows-shell.html.markdown} | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename website/source/docs/provisioners/{shell-local.html.md => shell-local.html.markdown} (100%) rename website/source/docs/provisioners/{windows-restart.html.md => windows-restart.html.markdown} (100%) rename website/source/docs/provisioners/{windows-shell.html.md => windows-shell.html.markdown} (100%) diff --git a/website/source/docs/provisioners/shell-local.html.md b/website/source/docs/provisioners/shell-local.html.markdown similarity index 100% rename from website/source/docs/provisioners/shell-local.html.md rename to website/source/docs/provisioners/shell-local.html.markdown diff --git a/website/source/docs/provisioners/windows-restart.html.md b/website/source/docs/provisioners/windows-restart.html.markdown similarity index 100% rename from website/source/docs/provisioners/windows-restart.html.md rename to website/source/docs/provisioners/windows-restart.html.markdown diff --git a/website/source/docs/provisioners/windows-shell.html.md b/website/source/docs/provisioners/windows-shell.html.markdown similarity index 100% rename from website/source/docs/provisioners/windows-shell.html.md rename to website/source/docs/provisioners/windows-shell.html.markdown From e73ec1f70daf0284cca4c189633be8e37b8c2981 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 3 Aug 2015 11:16:17 -0700 Subject: [PATCH 065/100] Use go vet instead of go tool vet, and actually run it with make --- Makefile | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 2fb386399..8ff6560a5 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,6 @@ TEST?=./... -VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods \ - -nilfunc -printf -rangeloops -shift -structtags -unsafeptr -default: test +default: test vet dev bin: @sh -c "$(CURDIR)/scripts/build.sh" @@ -41,10 +39,10 @@ updatedeps: | xargs go get -f -u -v vet: - @go tool vet 2>/dev/null ; if [ $$? -eq 3 ]; then \ + @go vet 2>/dev/null ; if [ $$? -eq 3 ]; then \ go get golang.org/x/tools/cmd/vet; \ fi - @go tool vet $(VETARGS) . ; if [ $$? -eq 1 ]; then \ + @go vet ./... ; if [ $$? -eq 1 ]; then \ echo ""; \ echo "Vet found suspicious constructs. Please check the reported constructs"; \ echo "and fix them if necessary before submitting the code for reviewal."; \ From 07eff4c014d97ce2f0f108dadd18b2c411bf33bf Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 3 Aug 2015 11:32:54 -0700 Subject: [PATCH 066/100] Reformat docs --- .../provisioners/shell-local.html.markdown | 37 ++++---- .../windows-restart.html.markdown | 33 +++---- .../provisioners/windows-shell.html.markdown | 86 +++++++++---------- 3 files changed, 79 insertions(+), 77 deletions(-) diff --git a/website/source/docs/provisioners/shell-local.html.markdown b/website/source/docs/provisioners/shell-local.html.markdown index b986cd5ef..198e31272 100644 --- a/website/source/docs/provisioners/shell-local.html.markdown +++ b/website/source/docs/provisioners/shell-local.html.markdown @@ -1,23 +1,25 @@ --- -layout: "docs" -page_title: "Local Shell Provisioner" -description: |- - The shell Packer provisioner provisions machines built by Packer using shell scripts. Shell provisioning is the easiest way to get software installed and configured on a machine. ---- +description: | + The shell Packer provisioner provisions machines built by Packer using shell + scripts. Shell provisioning is the easiest way to get software installed and + configured on a machine. +layout: docs +page_title: Local Shell Provisioner +... # Local Shell Provisioner Type: `shell-local` -The local shell provisioner executes a local shell script on the machine -running Packer. The [remote shell](/docs/provisioners/shell.html) -provisioner executes shell scripts on a remote machine. +The local shell provisioner executes a local shell script on the machine running +Packer. The [remote shell](/docs/provisioners/shell.html) provisioner executes +shell scripts on a remote machine. ## Basic Example The example below is fully functional. -```javascript +``` {.javascript} { "type": "shell-local", "command": "echo foo" @@ -31,15 +33,14 @@ required element is "command". Required: -* `command` (string) - The command to execute. This will be executed - within the context of a shell as specified by `execute_command`. +- `command` (string) - The command to execute. This will be executed within + the context of a shell as specified by `execute_command`. Optional parameters: -* `execute_command` (array of strings) - The command to use to execute the script. - By default this is `["/bin/sh", "-c", "{{.Command}"]`. The value is an array - of arguments executed directly by the OS. - The value of this is - treated as [configuration template](/docs/templates/configuration-templates.html). - The only available variable is `Command` which is the command to execute. - +- `execute_command` (array of strings) - The command to use to execute + the script. By default this is `["/bin/sh", "-c", "{{.Command}"]`. The value + is an array of arguments executed directly by the OS. The value of this is + treated as [configuration + template](/docs/templates/configuration-templates.html). The only available + variable is `Command` which is the command to execute. diff --git a/website/source/docs/provisioners/windows-restart.html.markdown b/website/source/docs/provisioners/windows-restart.html.markdown index a1b65cae1..05377ca23 100644 --- a/website/source/docs/provisioners/windows-restart.html.markdown +++ b/website/source/docs/provisioners/windows-restart.html.markdown @@ -1,16 +1,17 @@ --- -layout: "docs" -page_title: "Windows Restart Provisioner" -description: |- - The Windows restart provisioner restarts a Windows machine and waits for it to come back up. ---- +description: | + The Windows restart provisioner restarts a Windows machine and waits for it to + come back up. +layout: docs +page_title: Windows Restart Provisioner +... # Windows Restart Provisioner Type: `windows-restart` -The Windows restart provisioner initiates a reboot on a Windows machine -and waits for the machine to come back online. +The Windows restart provisioner initiates a reboot on a Windows machine and +waits for the machine to come back online. The Windows provisioning process often requires multiple reboots, and this provisioner helps to ease that process. @@ -19,7 +20,7 @@ provisioner helps to ease that process. The example below is fully functional. -```javascript +``` {.javascript} { "type": "windows-restart" } @@ -31,13 +32,13 @@ The reference of available configuration options is listed below. Optional parameters: -* `restart_command` (string) - The command to execute to initiate the - restart. By default this is `shutdown /r /c "packer restart" /t 5 && net stop winrm`. - A key action of this is to stop WinRM so that Packer can detect it - is rebooting. +- `restart_command` (string) - The command to execute to initiate the restart. + By default this is `shutdown /r /c "packer restart" /t 5 && net stop winrm`. + A key action of this is to stop WinRM so that Packer can detect it + is rebooting. -* `restart_check_command` (string) - A command to execute to check if the - restart succeeded. This will be done in a loop. +- `restart_check_command` (string) - A command to execute to check if the + restart succeeded. This will be done in a loop. -* `restart_timeout` (string) - The timeout to wait for the restart. - By default this is 5 minutes. Example value: "5m" +- `restart_timeout` (string) - The timeout to wait for the restart. By default + this is 5 minutes. Example value: "5m" diff --git a/website/source/docs/provisioners/windows-shell.html.markdown b/website/source/docs/provisioners/windows-shell.html.markdown index c758a5ebd..38f10fcef 100644 --- a/website/source/docs/provisioners/windows-shell.html.markdown +++ b/website/source/docs/provisioners/windows-shell.html.markdown @@ -1,22 +1,23 @@ --- -layout: "docs" -page_title: "Windows Shell Provisioner" -description: |- - The windows-shell Packer provisioner runs commands on Windows using the cmd shell. ---- +description: | + The windows-shell Packer provisioner runs commands on Windows using the cmd + shell. +layout: docs +page_title: Windows Shell Provisioner +... # Windows Shell Provisioner Type: `windows-shell` -The windows-shell Packer provisioner runs commands on a Windows machine -using `cmd`. It assumes it is running over WinRM. +The windows-shell Packer provisioner runs commands on a Windows machine using +`cmd`. It assumes it is running over WinRM. ## Basic Example The example below is fully functional. -```javascript +``` {.javascript} { "type": "windows-shell", "inline": ["dir c:\\"] @@ -28,48 +29,47 @@ The example below is fully functional. The reference of available configuration options is listed below. The only required element is either "inline" or "script". Every other option is optional. -Exactly _one_ of the following is required: +Exactly *one* of the following is required: -* `inline` (array of strings) - This is an array of commands to execute. - The commands are concatenated by newlines and turned into a single file, - so they are all executed within the same context. This allows you to - change directories in one command and use something in the directory in - the next and so on. Inline scripts are the easiest way to pull off simple - tasks within the machine. +- `inline` (array of strings) - This is an array of commands to execute. The + commands are concatenated by newlines and turned into a single file, so they + are all executed within the same context. This allows you to change + directories in one command and use something in the directory in the next + and so on. Inline scripts are the easiest way to pull off simple tasks + within the machine. -* `script` (string) - The path to a script to upload and execute in the machine. - This path can be absolute or relative. If it is relative, it is relative - to the working directory when Packer is executed. +- `script` (string) - The path to a script to upload and execute in + the machine. This path can be absolute or relative. If it is relative, it is + relative to the working directory when Packer is executed. -* `scripts` (array of strings) - An array of scripts to execute. The scripts - will be uploaded and executed in the order specified. Each script is executed - in isolation, so state such as variables from one script won't carry on to - the next. +- `scripts` (array of strings) - An array of scripts to execute. The scripts + will be uploaded and executed in the order specified. Each script is + executed in isolation, so state such as variables from one script won't + carry on to the next. Optional parameters: -* `binary` (boolean) - If true, specifies that the script(s) are binary - files, and Packer should therefore not convert Windows line endings to - Unix line endings (if there are any). By default this is false. +- `binary` (boolean) - If true, specifies that the script(s) are binary files, + and Packer should therefore not convert Windows line endings to Unix line + endings (if there are any). By default this is false. -* `environment_vars` (array of strings) - An array of key/value pairs - to inject prior to the execute_command. The format should be - `key=value`. Packer injects some environmental variables by default - into the environment, as well, which are covered in the section below. +- `environment_vars` (array of strings) - An array of key/value pairs to + inject prior to the execute\_command. The format should be `key=value`. + Packer injects some environmental variables by default into the environment, + as well, which are covered in the section below. -* `execute_command` (string) - The command to use to execute the script. - By default this is `{{ .Vars }}"{{ .Path }}"`. The value of this is - treated as [configuration template](/docs/templates/configuration-templates.html). - There are two available variables: `Path`, which is - the path to the script to run, and `Vars`, which is the list of - `environment_vars`, if configured. +- `execute_command` (string) - The command to use to execute the script. By + default this is `{{ .Vars }}"{{ .Path }}"`. The value of this is treated as + [configuration template](/docs/templates/configuration-templates.html). + There are two available variables: `Path`, which is the path to the script + to run, and `Vars`, which is the list of `environment_vars`, if configured. -* `remote_path` (string) - The path where the script will be uploaded to - in the machine. This defaults to "/tmp/script.sh". This value must be - a writable location and any parent directories must already exist. +- `remote_path` (string) - The path where the script will be uploaded to in + the machine. This defaults to "/tmp/script.sh". This value must be a + writable location and any parent directories must already exist. -* `start_retry_timeout` (string) - The amount of time to attempt to - _start_ the remote process. By default this is "5m" or 5 minutes. This - setting exists in order to deal with times when SSH may restart, such as - a system reboot. Set this to a higher value if reboots take a longer - amount of time. +- `start_retry_timeout` (string) - The amount of time to attempt to *start* + the remote process. By default this is "5m" or 5 minutes. This setting + exists in order to deal with times when SSH may restart, such as a + system reboot. Set this to a higher value if reboots take a longer amount + of time. From 8d6719e71fe5fb6b9a31c031c2e5b5849c8b8030 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 3 Aug 2015 16:34:24 -0700 Subject: [PATCH 067/100] Add failing test for compress interpolation --- .../compress/post-processor_test.go | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/post-processor/compress/post-processor_test.go b/post-processor/compress/post-processor_test.go index db23cf3b1..fec3b7a72 100644 --- a/post-processor/compress/post-processor_test.go +++ b/post-processor/compress/post-processor_test.go @@ -150,6 +150,35 @@ func TestCompressOptions(t *testing.T) { } } +func TestCompressInterpolation(t *testing.T) { + const config = ` + { + "post-processors": [ + { + "type": "compress", + "output": "{{ .BuildName }}.gz" + } + ] + } + ` + + artifact := testArchive(t, config) + defer artifact.Destroy() + + filename := "file.gz" + archive, err := os.Open(filename) + if err != nil { + t.Fatalf("Unable to read %s: %s", filename, err) + } + + gzipReader, _ := gzip.NewReader(archive) + data, _ := ioutil.ReadAll(gzipReader) + + if string(data) != expectedFileContents { + t.Errorf("Expected:\n%s\nFound:\n%s\n", expectedFileContents, data) + } +} + // Test Helpers func setup(t *testing.T) (packer.Ui, packer.Artifact, error) { From 4ef3baa3eedfc171cd3d66ab7542030693084d24 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 4 Aug 2015 19:30:57 -0700 Subject: [PATCH 068/100] Update test to include some interpolation configs --- post-processor/compress/post-processor_test.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/post-processor/compress/post-processor_test.go b/post-processor/compress/post-processor_test.go index fec3b7a72..ea1d973eb 100644 --- a/post-processor/compress/post-processor_test.go +++ b/post-processor/compress/post-processor_test.go @@ -156,7 +156,7 @@ func TestCompressInterpolation(t *testing.T) { "post-processors": [ { "type": "compress", - "output": "{{ .BuildName }}.gz" + "output": "{{ build_name}}-{{ .BuildName }}-{{.BuilderType}}.gz" } ] } @@ -165,7 +165,9 @@ func TestCompressInterpolation(t *testing.T) { artifact := testArchive(t, config) defer artifact.Destroy() - filename := "file.gz" + // You can interpolate using the .BuildName variable or build_name global + // function. We'll check both. + filename := "chocolate-vanilla-file.gz" archive, err := os.Open(filename) if err != nil { t.Fatalf("Unable to read %s: %s", filename, err) @@ -230,6 +232,13 @@ func testArchive(t *testing.T, config string) packer.Artifact { compressor := PostProcessor{} compressor.Configure(tpl.PostProcessors[0][0].Config) + + // I get the feeling these should be automatically available somewhere, but + // some of the post-processors construct this manually. + compressor.config.ctx.BuildName = "chocolate" + compressor.config.PackerBuildName = "vanilla" + compressor.config.PackerBuilderType = "file" + artifactOut, _, err := compressor.PostProcess(ui, artifact) if err != nil { t.Fatalf("Failed to compress artifact: %s", err) From 8f2a9de28e24d76aef02a9863fcf30d7e8623b25 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 4 Aug 2015 19:46:14 -0700 Subject: [PATCH 069/100] Updated documentation explaining how to use variables in compress post-processor filenames --- .../post-processors/compress.html.markdown | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/website/source/docs/post-processors/compress.html.markdown b/website/source/docs/post-processors/compress.html.markdown index ad78a9315..9236dd0e7 100644 --- a/website/source/docs/post-processors/compress.html.markdown +++ b/website/source/docs/post-processors/compress.html.markdown @@ -15,10 +15,11 @@ VMware or VirtualBox) and compresses the artifact into a single archive. ## Configuration -### Required: +### Optional: -You must specify the output filename. The archive format is derived from the -filename. +By default, packer will build archives in `.tar.gz` format with the following +filename: `packer_{{.BuildName}}_{{.BuilderType}}`. If you want to change this +you will need to specify the `output` option. - `output` (string) - The path to save the compressed archive. The archive format is inferred from the filename. E.g. `.tar.gz` will be a @@ -26,13 +27,9 @@ filename. detected packer defaults to `.tar.gz` behavior but will not change the filename. -If you are executing multiple builders in parallel you should make sure `output` -is unique for each one. For example `packer_{{.BuildName}}_{{.Provider}}.zip`. - -### Optional: - -If you want more control over how the archive is created you can specify the -following settings: + You can use `{{.BuildName}}` and ``{{.BuilderType}}` in your output path. + If you are executing multiple builders in parallel you should make sure + `output` is unique for each one. For example `packer_{{.BuildName}}.zip`. - `compression_level` (integer) - Specify the compression level, for algorithms that support it, from 1 through 9 inclusive. Typically higher @@ -61,14 +58,14 @@ configuration: ``` {.json} { "type": "compress", - "output": "archive.zip" + "output": "{{.BuildName}}_bundle.zip" } ``` ``` {.json} { "type": "compress", - "output": "archive.gz", + "output": "log_{{.BuildName}}.gz", "compression": 9 } ``` From fbb24d4acfa7746a416d48a0556043807a2130e5 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 4 Aug 2015 19:49:41 -0700 Subject: [PATCH 070/100] Changed interpolation logic so .BuildName can be used in the output config option --- post-processor/compress/post-processor.go | 44 +++++++++++------------ 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index bb6ce27bf..b95b27bde 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -55,9 +55,12 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { Interpolate: true, InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ - Exclude: []string{}, + Exclude: []string{"output"}, }, }, raws...) + if err != nil { + return err + } errs := new(packer.MultiError) @@ -67,16 +70,7 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } if p.config.OutputPath == "" { - p.config.OutputPath = "packer_{{.BuildName}}_{{.Provider}}" - } - - if err = interpolate.Validate(p.config.OutputPath, &p.config.ctx); err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error parsing target template: %s", err)) - } - - templates := map[string]*string{ - "output": &p.config.OutputPath, + p.config.OutputPath = "packer_{{.BuildName}}_{{.BuilderType}}" } if p.config.CompressionLevel > pgzip.BestCompression { @@ -89,17 +83,9 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { p.config.CompressionLevel = pgzip.DefaultCompression } - for key, ptr := range templates { - if *ptr == "" { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("%s must be set", key)) - } - - *ptr, err = interpolate.Render(p.config.OutputPath, &p.config.ctx) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", key, err)) - } + if err = interpolate.Validate(p.config.OutputPath, &p.config.ctx); err != nil { + errs = packer.MultiErrorAppend( + errs, fmt.Errorf("Error parsing target template: %s", err)) } p.config.detectFromFilename() @@ -113,7 +99,19 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { - target := p.config.OutputPath + // These are extra variables that will be made available for interpolation. + p.config.ctx.Data = map[string]string{ + "BuildName": p.config.PackerBuildName, + "BuilderType": p.config.PackerBuilderType, + } + + target, err := interpolate.Render(p.config.OutputPath, &p.config.ctx) + if err != nil { + return nil, false, fmt.Errorf("Error interpolating output value: %s", err) + } else { + fmt.Println(target) + } + keep := p.config.KeepInputArtifact newArtifact := &Artifact{Path: target} From 1c956ff406c8d03c88cd6f4af90b5e09c5256463 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 4 Aug 2015 20:11:53 -0700 Subject: [PATCH 071/100] Removed errant backtick --- website/source/docs/post-processors/compress.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/post-processors/compress.html.markdown b/website/source/docs/post-processors/compress.html.markdown index 9236dd0e7..3834ffc72 100644 --- a/website/source/docs/post-processors/compress.html.markdown +++ b/website/source/docs/post-processors/compress.html.markdown @@ -27,7 +27,7 @@ you will need to specify the `output` option. detected packer defaults to `.tar.gz` behavior but will not change the filename. - You can use `{{.BuildName}}` and ``{{.BuilderType}}` in your output path. + You can use `{{.BuildName}}` and `{{.BuilderType}}` in your output path. If you are executing multiple builders in parallel you should make sure `output` is unique for each one. For example `packer_{{.BuildName}}.zip`. From e7ab9fb3c0ad801e2e9bd38f8230ba543f8c4554 Mon Sep 17 00:00:00 2001 From: Bryce Fisher-Fleig Date: Wed, 5 Aug 2015 15:28:20 -0700 Subject: [PATCH 072/100] Add missing option --- website/source/docs/builders/virtualbox-iso.html.markdown | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/source/docs/builders/virtualbox-iso.html.markdown b/website/source/docs/builders/virtualbox-iso.html.markdown index 7df4975dc..61e5d3e16 100644 --- a/website/source/docs/builders/virtualbox-iso.html.markdown +++ b/website/source/docs/builders/virtualbox-iso.html.markdown @@ -72,6 +72,9 @@ builder. - `ssh_username` (string) - The username to use to SSH into the machine once the OS is installed. +- `ssh_password` (string) - The password to use to SSH into the machine once + the OS is installed. + ### Optional: - `boot_command` (array of strings) - This is an array of commands to type From abb67fdd7964385c23c9a57349bc07158f25798d Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 5 Aug 2015 19:41:29 -0700 Subject: [PATCH 073/100] Fix govet issues --- builder/amazon/common/artifact.go | 2 +- builder/amazon/common/state.go | 2 -- builder/openstack/server.go | 2 -- packer/rpc/server.go | 8 ++++---- provisioner/windows-restart/provisioner.go | 1 - 5 files changed, 5 insertions(+), 10 deletions(-) diff --git a/builder/amazon/common/artifact.go b/builder/amazon/common/artifact.go index aba2ffde4..4082b2abc 100644 --- a/builder/amazon/common/artifact.go +++ b/builder/amazon/common/artifact.go @@ -88,7 +88,7 @@ func (a *Artifact) Destroy() error { if len(errors) == 1 { return errors[0] } else { - return &packer.MultiError{errors} + return &packer.MultiError{Errors: errors} } } diff --git a/builder/amazon/common/state.go b/builder/amazon/common/state.go index 075ce8ef7..3b40a48d1 100644 --- a/builder/amazon/common/state.go +++ b/builder/amazon/common/state.go @@ -181,8 +181,6 @@ func WaitForState(conf *StateChangeConf) (i interface{}, err error) { time.Sleep(time.Duration(sleepSeconds) * time.Second) } - - return } func isTransientNetworkError(err error) bool { diff --git a/builder/openstack/server.go b/builder/openstack/server.go index 482657c03..0897821a8 100644 --- a/builder/openstack/server.go +++ b/builder/openstack/server.go @@ -92,6 +92,4 @@ func WaitForState(conf *StateChangeConf) (i interface{}, err error) { log.Printf("Waiting for state to become: %s currently %s (%d%%)", conf.Target, currentState, currentProgress) time.Sleep(2 * time.Second) } - - return } diff --git a/packer/rpc/server.go b/packer/rpc/server.go index b6d17dacf..ceb77a8d3 100644 --- a/packer/rpc/server.go +++ b/packer/rpc/server.go @@ -1,13 +1,13 @@ package rpc import ( - "fmt" - "github.com/hashicorp/go-msgpack/codec" - "github.com/mitchellh/packer/packer" "io" "log" "net/rpc" "sync/atomic" + + "github.com/hashicorp/go-msgpack/codec" + "github.com/mitchellh/packer/packer" ) var endpointId uint64 @@ -149,7 +149,7 @@ func (s *Server) Serve() { func registerComponent(server *rpc.Server, name string, rcvr interface{}, id bool) string { endpoint := name if id { - fmt.Sprintf("%s.%d", endpoint, atomic.AddUint64(&endpointId, 1)) + log.Printf("%s.%d", endpoint, atomic.AddUint64(&endpointId, 1)) } server.RegisterName(endpoint, rcvr) diff --git a/provisioner/windows-restart/provisioner.go b/provisioner/windows-restart/provisioner.go index 4b6af609e..2e4b7c371 100644 --- a/provisioner/windows-restart/provisioner.go +++ b/provisioner/windows-restart/provisioner.go @@ -134,7 +134,6 @@ WaitLoop: case <-p.cancel: close(waitDone) return fmt.Errorf("Interrupt detected, quitting waiting for machine to restart") - break WaitLoop } } From 94c12c9afcedc128f0aa6eacd25575f684fad2c2 Mon Sep 17 00:00:00 2001 From: Cameron Stokes Date: Thu, 6 Aug 2015 09:27:38 -0700 Subject: [PATCH 074/100] Fix 'ephemeral' misspelling. --- website/source/docs/other/debugging.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/other/debugging.html.markdown b/website/source/docs/other/debugging.html.markdown index 8c8012bc8..efe01a0cf 100644 --- a/website/source/docs/other/debugging.html.markdown +++ b/website/source/docs/other/debugging.html.markdown @@ -20,9 +20,9 @@ usually will stop between each step, waiting for keyboard input before continuing. This will allow you to inspect state and so on. In debug mode once the remote instance is instantiated, Packer will emit to the -current directory an emphemeral private ssh key as a .pem file. Using that you +current directory an ephemeral private ssh key as a .pem file. Using that you can `ssh -i ` into the remote build instance and see what is going on -for debugging. The emphemeral key will be deleted at the end of the packer run +for debugging. The ephemeral key will be deleted at the end of the packer run during cleanup. ### Windows From f40ccd55adaa78897fb9daa8e9463c6fac37815a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 6 Aug 2015 11:02:57 -0700 Subject: [PATCH 075/100] Added debug output to the makefile so I can see which commit travis is building --- Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Makefile b/Makefile index 8ff6560a5..0ed426520 100644 --- a/Makefile +++ b/Makefile @@ -14,6 +14,7 @@ generate: go generate ./... test: + @echo "Running tests on:"; git symbolic-ref HEAD; git rev-parse HEAD go test $(TEST) $(TESTARGS) -timeout=10s @$(MAKE) vet @@ -29,6 +30,7 @@ testrace: go test -race $(TEST) $(TESTARGS) updatedeps: + @echo "Updating deps on:"; git symbolic-ref HEAD; git rev-parse HEAD go get -u github.com/mitchellh/gox go get -u golang.org/x/tools/cmd/stringer go list ./... \ @@ -37,8 +39,10 @@ updatedeps: | grep -v '/internal/' \ | sort -u \ | xargs go get -f -u -v + @echo "Finished updating deps, now on:"; git symbolic-ref HEAD; git rev-parse HEAD vet: + @echo "Running go vet on:"; git symbolic-ref HEAD; git rev-parse HEAD @go vet 2>/dev/null ; if [ $$? -eq 3 ]; then \ go get golang.org/x/tools/cmd/vet; \ fi From af2fa705bf441699cc12accc25ef3801afc55cd9 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 6 Aug 2015 12:24:13 -0700 Subject: [PATCH 076/100] Added go vet and git rev-parse head to appveyor so we can see what we're actually building / testing --- appveyor.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/appveyor.yml b/appveyor.yml index 202456f58..c5d317da6 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -31,6 +31,8 @@ install: build_script: - go test -v ./... + - go vet ./... + - git rev-parse HEAD test: off From 211817c78e4b450b872d3095a778d580a48e5baa Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 6 Aug 2015 16:52:30 -0700 Subject: [PATCH 077/100] Fix formatting for code block in docs --- .../docs/builders/virtualbox-ovf.html.markdown | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/website/source/docs/builders/virtualbox-ovf.html.markdown b/website/source/docs/builders/virtualbox-ovf.html.markdown index b9b2de033..0800b14bc 100644 --- a/website/source/docs/builders/virtualbox-ovf.html.markdown +++ b/website/source/docs/builders/virtualbox-ovf.html.markdown @@ -19,13 +19,11 @@ image). When exporting from VirtualBox make sure to choose OVF Version 2, since Version 1 is not compatible and will generate errors like this: -==> virtualbox-ovf: Progress state: VBOX\_E\_FILE\_ERROR ==> -virtualbox-ovf: VBoxManage: error: Appliance read failed ==> virtualbox-ovf: -VBoxManage: error: Error reading "source.ova": element "Section" has no "type" -attribute, line 21 ==> virtualbox-ovf: VBoxManage: error: Details: code -VBOX\_E\_FILE\_ERROR (0x80bb0004), component Appliance, interface IAppliance -==> virtualbox-ovf: VBoxManage: error: Context: "int -handleImportAppliance(HandlerArg\*)" at line 304 of file VBoxManageAppliance.cpp + ==> virtualbox-ovf: Progress state: VBOX_E_FILE_ERROR + ==> virtualbox-ovf: VBoxManage: error: Appliance read failed + ==> virtualbox-ovf: VBoxManage: error: Error reading "source.ova": element "Section" has no "type" attribute, line 21 + ==> virtualbox-ovf: VBoxManage: error: Details: code VBOX_E_FILE_ERROR (0x80bb0004), component Appliance, interface IAppliance + ==> virtualbox-ovf: VBoxManage: error: Context: "int handleImportAppliance(HandlerArg*)" at line 304 of file VBoxManageAppliance.cpp The builder builds a virtual machine by importing an existing OVF or OVA file. It then boots this image, runs provisioners on this new VM, and exports that VM From 6dd0a21c89ff936ff565a2d1e8cee972533ab489 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Sun, 26 Jul 2015 16:22:46 -0700 Subject: [PATCH 078/100] Added an artifice post-processor which allows you to override artifacts in a post-processor chain --- plugin/post-processor-artifice/main.go | 15 +++++ post-processor/artifice/artifact.go | 56 +++++++++++++++++ post-processor/artifice/post-processor.go | 60 +++++++++++++++++++ .../artifice/post-processor_test.go | 1 + 4 files changed, 132 insertions(+) create mode 100644 plugin/post-processor-artifice/main.go create mode 100644 post-processor/artifice/artifact.go create mode 100644 post-processor/artifice/post-processor.go create mode 100644 post-processor/artifice/post-processor_test.go diff --git a/plugin/post-processor-artifice/main.go b/plugin/post-processor-artifice/main.go new file mode 100644 index 000000000..c503e1572 --- /dev/null +++ b/plugin/post-processor-artifice/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "github.com/mitchellh/packer/packer/plugin" + "github.com/mitchellh/packer/post-processor/artifice" +) + +func main() { + server, err := plugin.Server() + if err != nil { + panic(err) + } + server.RegisterPostProcessor(new(artifice.PostProcessor)) + server.Serve() +} diff --git a/post-processor/artifice/artifact.go b/post-processor/artifice/artifact.go new file mode 100644 index 000000000..cb344b8e2 --- /dev/null +++ b/post-processor/artifice/artifact.go @@ -0,0 +1,56 @@ +package artifice + +import ( + "fmt" + "os" + "strings" +) + +const BuilderId = "packer.post-processor.artifice" + +type Artifact struct { + files []string +} + +func NewArtifact(files []string) (*Artifact, error) { + for _, f := range files { + if _, err := os.Stat(f); err != nil { + return nil, err + } + } + artifact := &Artifact{ + files: files, + } + return artifact, nil +} + +func (a *Artifact) BuilderId() string { + return BuilderId +} + +func (a *Artifact) Files() []string { + return a.files +} + +func (a *Artifact) Id() string { + return "" +} + +func (a *Artifact) String() string { + files := strings.Join(a.files, ", ") + return fmt.Sprintf("Created artifact from files: %s", files) +} + +func (a *Artifact) State(name string) interface{} { + return nil +} + +func (a *Artifact) Destroy() error { + for _, f := range a.files { + err := os.RemoveAll(f) + if err != nil { + return err + } + } + return nil +} diff --git a/post-processor/artifice/post-processor.go b/post-processor/artifice/post-processor.go new file mode 100644 index 000000000..ff33184de --- /dev/null +++ b/post-processor/artifice/post-processor.go @@ -0,0 +1,60 @@ +package artifice + +import ( + "fmt" + "strings" + + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" +) + +// The artifact-override post-processor allows you to specify arbitrary files as +// artifacts. These will override any other artifacts created by the builder. +// This allows you to use a builder and provisioner to create some file, such as +// a compiled binary or tarball, extract it from the builder (VM or container) +// and then save that binary or tarball and throw away the builder. + +type Config struct { + common.PackerConfig `mapstructure:",squash"` + + Files []string `mapstructure:"files"` + Keep bool `mapstructure:"keep_input_artifact"` + + ctx interpolate.Context +} + +type PostProcessor struct { + config Config +} + +func (p *PostProcessor) Configure(raws ...interface{}) error { + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateContext: &p.config.ctx, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{}, + }, + }, raws...) + if err != nil { + return err + } + + if len(p.config.Files) == 0 { + return fmt.Errorf("No files specified in artifice configuration") + } + + return nil +} + +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { + if len(artifact.Files()) > 0 { + ui.Say(fmt.Sprintf("Discarding artifact files: %s", strings.Join(artifact.Files(), ", "))) + } + + artifact, err := NewArtifact(p.config.Files) + ui.Say(fmt.Sprintf("Using these artifact files: %s", strings.Join(artifact.Files(), ", "))) + + return artifact, true, err +} diff --git a/post-processor/artifice/post-processor_test.go b/post-processor/artifice/post-processor_test.go new file mode 100644 index 000000000..7e087e3e8 --- /dev/null +++ b/post-processor/artifice/post-processor_test.go @@ -0,0 +1 @@ +package artifice From 16d7e7542ae8da34f10b5ffb9870d8465e84884e Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 7 Aug 2015 20:10:17 -0700 Subject: [PATCH 079/100] Added docs for artifice --- .../post-processors/artifice.html.markdown | 147 ++++++++++++++++++ website/source/layouts/docs.erb | 1 + 2 files changed, 148 insertions(+) create mode 100644 website/source/docs/post-processors/artifice.html.markdown diff --git a/website/source/docs/post-processors/artifice.html.markdown b/website/source/docs/post-processors/artifice.html.markdown new file mode 100644 index 000000000..28255e836 --- /dev/null +++ b/website/source/docs/post-processors/artifice.html.markdown @@ -0,0 +1,147 @@ +--- +description: | + The Atlas post-processor for Packer receives an artifact from a Packer build and + uploads it to Atlas. Atlas hosts and serves artifacts, allowing you to version + and distribute them in a simple way. +layout: docs +page_title: 'Atlas Post-Processor' +... + +# Artifice Post-Processor + +\~> This is a beta feature, and may change significantly before it is +finalized. Please open a [GitHub issue to provide +feedback](https://github.com/mitchellh/packer/issues). + +Type: `artifice` + +The artifice post-processor overrides the artifact list from an upstream builder +or post-processor. All downstream post-processors will see the new artifacts you +specify. The primary use-case is to build artifacts inside a packer builder -- +for example, spinning up an EC2 instance to build a docker container -- and then +extracting the docker container and throwing away the EC2 instance. + +After overriding the artifact with artifice, you can use it with other +post-processors like +[compress](https://packer.io/docs/post-processors/compress.html), +[docker-push](https://packer.io/docs/post-processors/docker-push.html), +[Atlas](https://packer.io/docs/post-processors/atlas.html), or a third-party +post-processor. + +Artifice allows you to use the familiar packer workflow to create a fresh, +stateless build environment for each build on the infrastructure of your +choosing. You can use this to build just about anything: buildpacks, containers, +jars, binaries, tarballs, msi installers, and more. + +## Workflow + +Artifice helps you tie together a few other packer features: + +- A builder, which spins up a VM (or container) to build your artifact +- A provisioner, which performs the steps to create your artifact +- A file provisioner, which downloads the artifact from the VM +- The artifice post-processor, which identifies which files have been + downloaded from the VM +- Additional post-processors, which push the artifact to Atlas, Docker + hub, etc. + +You will want to perform as much work as possible inside the VM. Ideally +the only other post-processor you need after artifice is one that uploads your +artifact to the appropriate repository. + +## Configuration + +The configuration allows you to specify which files comprise your artifact. + +### Required: + +- `files` (array of strings) - A list of files that comprise your artifact. + These files must exist on your local disk after the provisioning phase of + packer is complete. These will replace any of the builder's original + artifacts (such as a VM snapshot). + +### Example Configuration + +This minimal example: + +1. Spins up a cloned VMware virtual machine +2. Installs a [consul](https://consul.io/) release +3. Downloads the consul binary +4. Packages it into a `.tar.gz` file +5. Uploads it to Atlas. + +VMX is a fast way to build and test locally, but you can easily substitute another builder. + +``` {.javascript} +{ + "builders": [ + { + "type": "vmware-vmx", + "source_path": "/opt/ubuntu-1404-vmware.vmx", + "ssh_username": "vagrant", + "ssh_password": "vagrant", + "shutdown_command": "sudo shutdown -h now", + "headless":"true", + "skip_compaction":"true" + } + ], + "provisioners": [ + { + "type": "shell", + "inline": [ + "sudo apt-get install -y python-pip", + "sudo pip install ifs", + "sudo ifs install consul --version=0.5.2" + ] + }, + { + "type": "file", + "source": "/usr/local/bin/consul", + "destination": "consul", + "direction": "download" + } + ], + "post-processors": [ + [ + { + "type": "artifice", + "files": ["consul"] + }, + { + "type": "compress", + "output": "consul-0.5.2.tar.gz" + }, + { + "type":"atlas", + "artifact": "hashicorp/consul", + "artifact_type": "archive" + } + ] + ] +} +``` + +**Notice that there are two sets of square brackets in the post-processor +section.** This creates a post-processor chain, where the output of the +proceeding artifact is passed to subsequent post-processors. If you use only one +set of square braces the post-processors will run individually against the build +artifact (the vmx file in this case) and it will not have the desired result. + + "post-processors": [ + [ <--- Start post-processor chain + { + "type": "artifice", + "files": ["consul"] + }, + { + "type": "atlas", + ... + } + ], <--- End post-processor chain + { + "type":"compress" <-- Standalone post-processor + } + ] + +You can create multiple post-processor chains to handle multiple builders (for example, +building linux and windows binaries during the same build). diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 2b8bb8810..0bba9799c 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -69,6 +69,7 @@
  • Post-Processors

  • +
  • Artifice
  • Atlas
  • compress
  • docker-import
  • From 8484c2e2a05c5a9a22c61d0cdb0df09f612b5691 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Sat, 8 Aug 2015 00:51:01 -0700 Subject: [PATCH 080/100] Prepare 0.8.3 --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 84958092f..59a306403 100644 --- a/version.go +++ b/version.go @@ -9,4 +9,4 @@ const Version = "0.8.3" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "dev" +const VersionPrerelease = "" From 441695446115309b8a09dc80c94d8ded45d38a9a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 10 Aug 2015 10:25:19 -0700 Subject: [PATCH 081/100] Corrected the meta text on the artifice page. --- .../post-processors/artifice.html.markdown | 29 ++++++++++--------- .../post-processors/compress.html.markdown | 4 +-- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/website/source/docs/post-processors/artifice.html.markdown b/website/source/docs/post-processors/artifice.html.markdown index 28255e836..2ee9abc85 100644 --- a/website/source/docs/post-processors/artifice.html.markdown +++ b/website/source/docs/post-processors/artifice.html.markdown @@ -1,8 +1,10 @@ --- description: | - The Atlas post-processor for Packer receives an artifact from a Packer build and - uploads it to Atlas. Atlas hosts and serves artifacts, allowing you to version - and distribute them in a simple way. + The artifice post-processor overrides the artifact list from an upstream builder + or post-processor. All downstream post-processors will see the new artifacts you + specify. The primary use-case is to build artifacts inside a packer builder -- + for example, spinning up an EC2 instance to build a docker container -- and then + extracting the docker container and throwing away the EC2 instance. layout: docs page_title: 'Atlas Post-Processor' ... @@ -45,8 +47,8 @@ Artifice helps you tie together a few other packer features: - Additional post-processors, which push the artifact to Atlas, Docker hub, etc. -You will want to perform as much work as possible inside the VM. Ideally -the only other post-processor you need after artifice is one that uploads your +You will want to perform as much work as possible inside the VM. Ideally the +only other post-processor you need after artifice is one that uploads your artifact to the appropriate repository. ## Configuration @@ -64,13 +66,14 @@ The configuration allows you to specify which files comprise your artifact. This minimal example: -1. Spins up a cloned VMware virtual machine -2. Installs a [consul](https://consul.io/) release -3. Downloads the consul binary -4. Packages it into a `.tar.gz` file -5. Uploads it to Atlas. +1. Spins up a cloned VMware virtual machine +2. Installs a [consul](https://consul.io/) release +3. Downloads the consul binary +4. Packages it into a `.tar.gz` file +5. Uploads it to Atlas. -VMX is a fast way to build and test locally, but you can easily substitute another builder. +VMX is a fast way to build and test locally, but you can easily substitute +another builder. ``` {.javascript} { @@ -143,5 +146,5 @@ artifact (the vmx file in this case) and it will not have the desired result. } ] -You can create multiple post-processor chains to handle multiple builders (for example, -building linux and windows binaries during the same build). +You can create multiple post-processor chains to handle multiple builders (for +example, building linux and windows binaries during the same build). diff --git a/website/source/docs/post-processors/compress.html.markdown b/website/source/docs/post-processors/compress.html.markdown index 3834ffc72..373230d44 100644 --- a/website/source/docs/post-processors/compress.html.markdown +++ b/website/source/docs/post-processors/compress.html.markdown @@ -27,8 +27,8 @@ you will need to specify the `output` option. detected packer defaults to `.tar.gz` behavior but will not change the filename. - You can use `{{.BuildName}}` and `{{.BuilderType}}` in your output path. - If you are executing multiple builders in parallel you should make sure + You can use `{{.BuildName}}` and `{{.BuilderType}}` in your output path. If + you are executing multiple builders in parallel you should make sure `output` is unique for each one. For example `packer_{{.BuildName}}.zip`. - `compression_level` (integer) - Specify the compression level, for From 3a6cac97dd99aecbda7a641b71d1066aa40eb756 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 10 Aug 2015 12:45:20 -0700 Subject: [PATCH 082/100] Added v0.8.3 changelog --- CHANGELOG.md | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 172589cd4..55514c156 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,32 @@ +## 0.8.3 (Aug 8, 2015) + +FEATURES: + + * **[Beta] Artifice post-processor:** Override packer artifacts during post- + processing. This allows you to **extract artifacts from a packer builder** + and use them with other post-processors like compress, docker, and Atlas. + +IMPROVEMENTS: + + * Many docs have been updated and corrected; big thanks to our contributors! + * builder/openstack: Add debug logging for IP addresses used for SSH [GH-2513] + * builder/openstack: Add option to use existing SSH keypair [GH-2512] + * builder/openstack: Add support for Glance metadata [GH-2434] + * builder/qemu and builder/vmware: Packer's VNC connection no longer asks for + an exclusive connection [GH-2522] + * provisioner/salt-masterless: Can now customize salt remote directories [GH-2519] + +BUG FIXES: + + * builder/openstack: track new IP address discovered during RackConnect [GH-2514] + * post-processor/atlas: atlas_url configuration option works now [GH-2478] + * post-processor/compress: Now supports interpolation in output config [GH-2414] + * provisioner/powershell: Elevated runs now receive environment variables [GH-2378] + * provisioner/salt-masterless: Clarify error messages when we can't create or + write to the temp directory [GH-2518] + * provisioner/salt-masterless: Copy state even if /srv/salt exists already [GH-1699] + * provisioner/salt-masterless: Make sure /etc/salt exists before writing to it [GH-2520] + ## 0.8.2 (July 17, 2015) IMPROVEMENTS: From f1eb95dbe04f9c6f3b7fe28d08a45211c8d6e17f Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 10 Aug 2015 13:30:31 -0700 Subject: [PATCH 083/100] Remove extra emphasis --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 55514c156..1b04e6722 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,8 +2,8 @@ FEATURES: - * **[Beta] Artifice post-processor:** Override packer artifacts during post- - processing. This allows you to **extract artifacts from a packer builder** + * **[Beta]** Artifice post-processor: Override packer artifacts during post- + processing. This allows you to extract artifacts from a packer builder and use them with other post-processors like compress, docker, and Atlas. IMPROVEMENTS: From 4cc443da8ecc3a20801f4b8cdbe19e8302f66495 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 10 Aug 2015 13:59:56 -0700 Subject: [PATCH 084/100] Update use of ec2rolecreds to match upstream --- builder/amazon/common/access_config.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/builder/amazon/common/access_config.go b/builder/amazon/common/access_config.go index 88bda0423..dccde08d4 100644 --- a/builder/amazon/common/access_config.go +++ b/builder/amazon/common/access_config.go @@ -9,6 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" "github.com/mitchellh/packer/template/interpolate" ) @@ -31,7 +32,7 @@ func (c *AccessConfig) Config() (*aws.Config, error) { }}, &credentials.EnvProvider{}, &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - &credentials.EC2RoleProvider{}, + &ec2rolecreds.EC2RoleProvider{}, }) region, err := c.Region() From fce6f86328e79dcb9908a49fb4ab59532d5a2312 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 10 Aug 2015 14:19:20 -0700 Subject: [PATCH 085/100] Updated changelog with some missing changes --- CHANGELOG.md | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b04e6722..226c23f22 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ ## 0.8.3 (Aug 8, 2015) +BACKWARDS INCOMPATIBILITIES: + + * VMware VMX options are no longer lowercased internally. This is to support + the virtualSSD option which is case-sensitive. See [GH-2309] for details. + FEATURES: * **[Beta]** Artifice post-processor: Override packer artifacts during post- @@ -14,11 +19,17 @@ IMPROVEMENTS: * builder/openstack: Add support for Glance metadata [GH-2434] * builder/qemu and builder/vmware: Packer's VNC connection no longer asks for an exclusive connection [GH-2522] + * builder/vmware: Add support for virtualSSD option [GH-2309] * provisioner/salt-masterless: Can now customize salt remote directories [GH-2519] BUG FIXES: - * builder/openstack: track new IP address discovered during RackConnect [GH-2514] + * builder/amazon: Improve instance cleanup by storing id sooner [GH-2404] + * builder/amazon: Only fetch windows password when using WinRM communicator [GH-2538] + * builder/openstack: Support IPv6 SSH address [GH-2450] + * builder/openstack: Track new IP address discovered during RackConnect [GH-2514] + * builder/qemu: Add 100ms delay between VNC key events. [GH-2415] + * builder/vmware: Don't force lowercase all VMX options [GH-2309] * post-processor/atlas: atlas_url configuration option works now [GH-2478] * post-processor/compress: Now supports interpolation in output config [GH-2414] * provisioner/powershell: Elevated runs now receive environment variables [GH-2378] @@ -26,6 +37,8 @@ BUG FIXES: write to the temp directory [GH-2518] * provisioner/salt-masterless: Copy state even if /srv/salt exists already [GH-1699] * provisioner/salt-masterless: Make sure /etc/salt exists before writing to it [GH-2520] + * provisioner/winrm: Connect to the correct port when using NAT with + VirtualBox / VMware [GH-2399] ## 0.8.2 (July 17, 2015) From 313fcaf0ff85bc759bac5cc19c77307d53122e9e Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 10 Aug 2015 14:52:34 -0700 Subject: [PATCH 086/100] Revert backwards-compatibility break in VMX option casing PR #2309 introduced case-sensitive options in VMX files. This is to support a case-sensitive option called `virtualSSD`. The change made all options case-sensitive, which causes problems with external VMX options provided in user templates. To prevent breakage, this change is being reverted. - Fixes #2574 - Reverts #2542 - Reverts #2309 --- builder/vmware/common/ssh.go | 2 +- builder/vmware/common/step_clean_vmx.go | 9 +++++---- builder/vmware/common/step_clean_vmx_test.go | 18 +++++++++--------- builder/vmware/common/step_configure_vmx.go | 2 ++ builder/vmware/common/vmx.go | 20 ++++---------------- builder/vmware/vmx/step_clone_vmx.go | 12 ++++++------ 6 files changed, 27 insertions(+), 36 deletions(-) diff --git a/builder/vmware/common/ssh.go b/builder/vmware/common/ssh.go index 9db075a71..86e184bb5 100644 --- a/builder/vmware/common/ssh.go +++ b/builder/vmware/common/ssh.go @@ -39,7 +39,7 @@ func CommHost(config *SSHConfig) func(multistep.StateBag) (string, error) { var ok bool macAddress := "" if macAddress, ok = vmxData["ethernet0.address"]; !ok || macAddress == "" { - if macAddress, ok = vmxData["ethernet0.generatedAddress"]; !ok || macAddress == "" { + if macAddress, ok = vmxData["ethernet0.generatedaddress"]; !ok || macAddress == "" { return "", errors.New("couldn't find MAC address in VMX") } } diff --git a/builder/vmware/common/step_clean_vmx.go b/builder/vmware/common/step_clean_vmx.go index 44bf4c407..e9bc51987 100755 --- a/builder/vmware/common/step_clean_vmx.go +++ b/builder/vmware/common/step_clean_vmx.go @@ -2,11 +2,12 @@ package common import ( "fmt" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" "log" "regexp" "strings" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" ) // This step cleans up the VMX by removing or changing this prior to @@ -51,8 +52,8 @@ func (s StepCleanVMX) Run(state multistep.StateBag) multistep.StepAction { ui.Message("Detaching ISO from CD-ROM device...") - vmxData[ide+"deviceType"] = "cdrom-raw" - vmxData[ide+"fileName"] = "auto detect" + vmxData[ide+"devicetype"] = "cdrom-raw" + vmxData[ide+"filename"] = "auto detect" } ui.Message("Disabling VNC server...") diff --git a/builder/vmware/common/step_clean_vmx_test.go b/builder/vmware/common/step_clean_vmx_test.go index 3ca6a7e23..ea30fb54a 100755 --- a/builder/vmware/common/step_clean_vmx_test.go +++ b/builder/vmware/common/step_clean_vmx_test.go @@ -61,8 +61,8 @@ func TestStepCleanVMX_floppyPath(t *testing.T) { Value string }{ {"floppy0.present", "FALSE"}, - {"floppy0.fileType", ""}, - {"floppy0.fileName", ""}, + {"floppy0.filetype", ""}, + {"floppy0.filename", ""}, } for _, tc := range cases { @@ -109,9 +109,9 @@ func TestStepCleanVMX_isoPath(t *testing.T) { Key string Value string }{ - {"ide0:0.fileName", "auto detect"}, - {"ide0:0.deviceType", "cdrom-raw"}, - {"ide0:1.fileName", "bar"}, + {"ide0:0.filename", "auto detect"}, + {"ide0:0.devicetype", "cdrom-raw"}, + {"ide0:1.filename", "bar"}, {"foo", "bar"}, } @@ -130,12 +130,12 @@ func TestStepCleanVMX_isoPath(t *testing.T) { const testVMXFloppyPath = ` floppy0.present = "TRUE" -floppy0.fileType = "file" +floppy0.filetype = "file" ` const testVMXISOPath = ` -ide0:0.deviceType = "cdrom-image" -ide0:0.fileName = "foo" -ide0:1.fileName = "bar" +ide0:0.devicetype = "cdrom-image" +ide0:0.filename = "foo" +ide0:1.filename = "bar" foo = "bar" ` diff --git a/builder/vmware/common/step_configure_vmx.go b/builder/vmware/common/step_configure_vmx.go index 14c68e76a..401d53055 100755 --- a/builder/vmware/common/step_configure_vmx.go +++ b/builder/vmware/common/step_configure_vmx.go @@ -5,6 +5,7 @@ import ( "io/ioutil" "log" "regexp" + "strings" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" @@ -52,6 +53,7 @@ func (s *StepConfigureVMX) Run(state multistep.StateBag) multistep.StepAction { // Set custom data for k, v := range s.CustomData { log.Printf("Setting VMX: '%s' = '%s'", k, v) + k = strings.ToLower(k) vmxData[k] = v } diff --git a/builder/vmware/common/vmx.go b/builder/vmware/common/vmx.go index ab0291807..e7cdb662f 100755 --- a/builder/vmware/common/vmx.go +++ b/builder/vmware/common/vmx.go @@ -17,7 +17,7 @@ import ( func ParseVMX(contents string) map[string]string { results := make(map[string]string) - lineRe := regexp.MustCompile(`^(.+?)\s*=\s*"?(.*?)"?\s*$`) + lineRe := regexp.MustCompile(`^(.+?)\s*=\s*"(.*?)"\s*$`) for _, line := range strings.Split(contents, "\n") { matches := lineRe.FindStringSubmatch(line) @@ -25,7 +25,8 @@ func ParseVMX(contents string) map[string]string { continue } - results[matches[1]] = matches[2] + key := strings.ToLower(matches[1]) + results[key] = matches[2] } return results @@ -42,22 +43,9 @@ func EncodeVMX(contents map[string]string) string { i++ } - // a list of VMX key fragments that should not be wrapped in quotes, - // fragments because multiple disks can use the virtualSSD suffix - noQuotes := []string { - "virtualSSD", - } - sort.Strings(keys) for _, k := range keys { - pat := "%s = \"%s\"\n" - for _, q := range noQuotes { - if strings.Contains(k, q) { - pat = "%s = %s\n" - break; - } - } - buf.WriteString(fmt.Sprintf(pat, k, contents[k])) + buf.WriteString(fmt.Sprintf("%s = \"%s\"\n", k, contents[k])) } return buf.String() diff --git a/builder/vmware/vmx/step_clone_vmx.go b/builder/vmware/vmx/step_clone_vmx.go index 1dbae678a..a020e1627 100755 --- a/builder/vmware/vmx/step_clone_vmx.go +++ b/builder/vmware/vmx/step_clone_vmx.go @@ -38,14 +38,14 @@ func (s *StepCloneVMX) Run(state multistep.StateBag) multistep.StepAction { } var diskName string - if _, ok := vmxData["scsi0:0.fileName"]; ok { - diskName = vmxData["scsi0:0.fileName"] + if _, ok := vmxData["scsi0:0.filename"]; ok { + diskName = vmxData["scsi0:0.filename"] } - if _, ok := vmxData["sata0:0.fileName"]; ok { - diskName = vmxData["sata0:0.fileName"] + if _, ok := vmxData["sata0:0.filename"]; ok { + diskName = vmxData["sata0:0.filename"] } - if _, ok := vmxData["ide0:0.fileName"]; ok { - diskName = vmxData["ide0:0.fileName"] + if _, ok := vmxData["ide0:0.filename"]; ok { + diskName = vmxData["ide0:0.filename"] } if diskName == "" { err := fmt.Errorf("Root disk filename could not be found!") From 27e7a02e6281f2c618a0c6db734104e7261de552 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 10 Aug 2015 18:04:47 -0700 Subject: [PATCH 087/100] Replace v0.8.3 changelog with v0.8.4. --- CHANGELOG.md | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 226c23f22..6b9ee3681 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,4 @@ -## 0.8.3 (Aug 8, 2015) - -BACKWARDS INCOMPATIBILITIES: - - * VMware VMX options are no longer lowercased internally. This is to support - the virtualSSD option which is case-sensitive. See [GH-2309] for details. +## 0.8.4 (Aug 10, 2015) FEATURES: @@ -19,7 +14,6 @@ IMPROVEMENTS: * builder/openstack: Add support for Glance metadata [GH-2434] * builder/qemu and builder/vmware: Packer's VNC connection no longer asks for an exclusive connection [GH-2522] - * builder/vmware: Add support for virtualSSD option [GH-2309] * provisioner/salt-masterless: Can now customize salt remote directories [GH-2519] BUG FIXES: @@ -29,7 +23,6 @@ BUG FIXES: * builder/openstack: Support IPv6 SSH address [GH-2450] * builder/openstack: Track new IP address discovered during RackConnect [GH-2514] * builder/qemu: Add 100ms delay between VNC key events. [GH-2415] - * builder/vmware: Don't force lowercase all VMX options [GH-2309] * post-processor/atlas: atlas_url configuration option works now [GH-2478] * post-processor/compress: Now supports interpolation in output config [GH-2414] * provisioner/powershell: Elevated runs now receive environment variables [GH-2378] From 60bbe850ef0b7fec19eba1929d83e7267ca1572b Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 10 Aug 2015 18:30:33 -0700 Subject: [PATCH 088/100] Bump version to v0.8.5 --- CHANGELOG.md | 4 +++- version.go | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b9ee3681..90b5bed8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.8.4 (Aug 10, 2015) +## 0.8.5 (Aug 10, 2015) FEATURES: @@ -33,6 +33,8 @@ BUG FIXES: * provisioner/winrm: Connect to the correct port when using NAT with VirtualBox / VMware [GH-2399] +Note: 0.8.3 was pulled and 0.8.4 was skipped. + ## 0.8.2 (July 17, 2015) IMPROVEMENTS: diff --git a/version.go b/version.go index 59a306403..1aaa4c8df 100644 --- a/version.go +++ b/version.go @@ -4,7 +4,7 @@ package main var GitCommit string // The main version number that is being run at the moment. -const Version = "0.8.3" +const Version = "0.8.5" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release From 5a6bcdeb7899c6518a60aba700308dbf7b92a587 Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Wed, 12 Aug 2015 01:34:08 +0200 Subject: [PATCH 089/100] Fix interpolation of {{.Flavor}} in parallels_tools_guest_path. Fixes [GH-2543] --- .../step_upload_parallels_tools_test.go | 86 +++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 builder/parallels/common/step_upload_parallels_tools_test.go diff --git a/builder/parallels/common/step_upload_parallels_tools_test.go b/builder/parallels/common/step_upload_parallels_tools_test.go new file mode 100644 index 000000000..0599912a9 --- /dev/null +++ b/builder/parallels/common/step_upload_parallels_tools_test.go @@ -0,0 +1,86 @@ +package common + +import ( + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + "testing" +) + +func TestStepUploadParallelsTools_impl(t *testing.T) { + var _ multistep.Step = new(StepUploadParallelsTools) +} + +func TestStepUploadParallelsTools(t *testing.T) { + state := testState(t) + state.Put("parallels_tools_path", "./step_upload_parallels_tools_test.go") + step := new(StepUploadParallelsTools) + step.ParallelsToolsMode = "upload" + step.ParallelsToolsGuestPath = "/tmp/prl-lin.iso" + step.ParallelsToolsFlavor = "lin" + + comm := new(packer.MockCommunicator) + state.Put("communicator", comm) + + // Test the run + if action := step.Run(state); action != multistep.ActionContinue { + t.Fatalf("bad action: %#v", action) + } + if _, ok := state.GetOk("error"); ok { + t.Fatal("should NOT have error") + } + + // Verify + if comm.UploadPath != "/tmp/prl-lin.iso" { + t.Fatalf("bad: %#v", comm.UploadPath) + } +} + +func TestStepUploadParallelsTools_interpolate(t *testing.T) { + state := testState(t) + state.Put("parallels_tools_path", "./step_upload_parallels_tools_test.go") + step := new(StepUploadParallelsTools) + step.ParallelsToolsMode = "upload" + step.ParallelsToolsGuestPath = "/tmp/prl-{{ .Flavor }}.iso" + step.ParallelsToolsFlavor = "win" + + comm := new(packer.MockCommunicator) + state.Put("communicator", comm) + + // Test the run + if action := step.Run(state); action != multistep.ActionContinue { + t.Fatalf("bad action: %#v", action) + } + if _, ok := state.GetOk("error"); ok { + t.Fatal("should NOT have error") + } + + // Verify + if comm.UploadPath != "/tmp/prl-win.iso" { + t.Fatalf("bad: %#v", comm.UploadPath) + } +} + +func TestStepUploadParallelsTools_attach(t *testing.T) { + state := testState(t) + state.Put("parallels_tools_path", "./step_upload_parallels_tools_test.go") + step := new(StepUploadParallelsTools) + step.ParallelsToolsMode = "attach" + step.ParallelsToolsGuestPath = "/tmp/prl-lin.iso" + step.ParallelsToolsFlavor = "lin" + + comm := new(packer.MockCommunicator) + state.Put("communicator", comm) + + // Test the run + if action := step.Run(state); action != multistep.ActionContinue { + t.Fatalf("bad action: %#v", action) + } + if _, ok := state.GetOk("error"); ok { + t.Fatal("should NOT have error") + } + + // Verify + if comm.UploadCalled { + t.Fatal("bad") + } +} From d9a0f059262d8199bcd5d4b36f460264be380485 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 11 Aug 2015 17:10:34 -0700 Subject: [PATCH 090/100] Bump version.go to reflect dev status --- version.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/version.go b/version.go index 1aaa4c8df..a32442840 100644 --- a/version.go +++ b/version.go @@ -4,9 +4,9 @@ package main var GitCommit string // The main version number that is being run at the moment. -const Version = "0.8.5" +const Version = "0.8.6" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "" +const VersionPrerelease = "dev" From dc3c55cf8e7366514018b4c065a40dc18917bd56 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 11 Aug 2015 22:22:52 -0700 Subject: [PATCH 091/100] Implemented downloader for the docker communicator so we can pull files out of a container --- builder/docker/communicator.go | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index 4fcd9b658..31ccc2579 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -194,8 +194,36 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error return nil } +// Download pulls a file out of a container using `docker cp`. We have a source +// path and want to write to an io.Writer, not a file. We use - to make docker +// cp to write to stdout, and then copy the stream to our destination io.Writer. func (c *Communicator) Download(src string, dst io.Writer) error { - panic("not implemented") + + log.Printf("Downloading file from container: %s:%s", c.ContainerId, src) + localCmd := exec.Command("docker", "cp", fmt.Sprintf("%s:%s", c.ContainerId, src), "-") + + pipe, err := localCmd.StdoutPipe() + if err != nil { + return fmt.Errorf("Failed to open pipe: %s", err) + } + + err = localCmd.Start() + if err != nil { + return fmt.Errorf("Failed to start download: %s", err) + } + + numBytes, err := io.Copy(dst, pipe) + if err != nil { + return fmt.Errorf("Failed to pipe download: %s", err) + } else { + log.Printf("Copied %d bytes for %s", numBytes, src) + } + + if err = localCmd.Wait(); err != nil { + return fmt.Errorf("Failed to download '%s' from container: %s", src, err) + } + + return nil } // canExec tells us whether `docker exec` is supported From 047382eec9e0cc39e5dbdd9ecd46fb73c7943f91 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 11 Aug 2015 22:30:19 -0700 Subject: [PATCH 092/100] Style tweak --- builder/docker/communicator.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index 31ccc2579..38126366c 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -207,17 +207,15 @@ func (c *Communicator) Download(src string, dst io.Writer) error { return fmt.Errorf("Failed to open pipe: %s", err) } - err = localCmd.Start() - if err != nil { + if err = localCmd.Start(); err != nil { return fmt.Errorf("Failed to start download: %s", err) } numBytes, err := io.Copy(dst, pipe) if err != nil { return fmt.Errorf("Failed to pipe download: %s", err) - } else { - log.Printf("Copied %d bytes for %s", numBytes, src) } + log.Printf("Copied %d bytes for %s", numBytes, src) if err = localCmd.Wait(); err != nil { return fmt.Errorf("Failed to download '%s' from container: %s", src, err) From da82ff3fd687e4b2c79652a09f382a4ecde5175d Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Wed, 12 Aug 2015 10:28:06 +0200 Subject: [PATCH 093/100] Fix interpolation of {{.Flavor}} in parallels_tools_guest_path. (2) Actually fix the error... Fixes [GH-2543] --- builder/parallels/iso/builder.go | 2 +- builder/parallels/pvm/config.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/parallels/iso/builder.go b/builder/parallels/iso/builder.go index 4a75b0b47..6b731544d 100644 --- a/builder/parallels/iso/builder.go +++ b/builder/parallels/iso/builder.go @@ -64,7 +64,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { Exclude: []string{ "boot_command", "prlctl", - "parallel_tools_guest_path", + "parallels_tools_guest_path", }, }, }, raws...) diff --git a/builder/parallels/pvm/config.go b/builder/parallels/pvm/config.go index f03584bf2..89c3ec1f9 100644 --- a/builder/parallels/pvm/config.go +++ b/builder/parallels/pvm/config.go @@ -41,7 +41,7 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { Exclude: []string{ "boot_command", "prlctl", - "parallel_tools_guest_path", + "parallels_tools_guest_path", }, }, }, raws...) From 3523ffdce14b6e27b640b959f405fd972ca8a22e Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 12 Aug 2015 11:36:10 -0700 Subject: [PATCH 094/100] Farewell extra line. You were pretty but out of place. --- builder/docker/communicator.go | 1 - 1 file changed, 1 deletion(-) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index 38126366c..8af54bdfe 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -198,7 +198,6 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error // path and want to write to an io.Writer, not a file. We use - to make docker // cp to write to stdout, and then copy the stream to our destination io.Writer. func (c *Communicator) Download(src string, dst io.Writer) error { - log.Printf("Downloading file from container: %s:%s", c.ContainerId, src) localCmd := exec.Command("docker", "cp", fmt.Sprintf("%s:%s", c.ContainerId, src), "-") From de9ecd2d62cc0cc38dc66293cd98e3599e6703f5 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 12 Aug 2015 11:36:33 -0700 Subject: [PATCH 095/100] Add a test fixture file --- builder/docker/test-fixtures/cake | 1 + 1 file changed, 1 insertion(+) create mode 100644 builder/docker/test-fixtures/cake diff --git a/builder/docker/test-fixtures/cake b/builder/docker/test-fixtures/cake new file mode 100644 index 000000000..63d40b126 --- /dev/null +++ b/builder/docker/test-fixtures/cake @@ -0,0 +1 @@ +chocolate cake is delicious From 62c5e8358d4045e5ee1ba64956e3536a5952bb4d Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 12 Aug 2015 11:36:56 -0700 Subject: [PATCH 096/100] Added a test for docker upload and download --- builder/docker/communicator_test.go | 117 +++++++++++++++++++++++++++- 1 file changed, 116 insertions(+), 1 deletion(-) diff --git a/builder/docker/communicator_test.go b/builder/docker/communicator_test.go index f75a89d96..221356723 100644 --- a/builder/docker/communicator_test.go +++ b/builder/docker/communicator_test.go @@ -1,10 +1,125 @@ package docker import ( - "github.com/mitchellh/packer/packer" + "crypto/sha256" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" "testing" + + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/provisioner/file" + "github.com/mitchellh/packer/template" ) func TestCommunicator_impl(t *testing.T) { var _ packer.Communicator = new(Communicator) } + +func TestUploadDownload(t *testing.T) { + ui := packer.TestUi(t) + cache := &packer.FileCache{CacheDir: os.TempDir()} + + tpl, err := template.Parse(strings.NewReader(dockerBuilderConfig)) + if err != nil { + t.Fatalf("Unable to parse config: %s", err) + } + + // Make sure we only run this on linux hosts + if os.Getenv("PACKER_ACC") == "" { + t.Skip("This test is only run with PACKER_ACC=1") + } + if runtime.GOOS != "linux" { + t.Skip("This test is only supported on linux") + } + cmd := exec.Command("docker", "-v") + cmd.Run() + if !cmd.ProcessState.Success() { + t.Error("docker command not found; please make sure docker is installed") + } + + // Setup the builder + builder := &Builder{} + warnings, err := builder.Prepare(tpl.Builders["docker"].Config) + if err != nil { + t.Fatalf("Error preparing configuration %s", err) + } + if len(warnings) > 0 { + t.Fatal("Encountered configuration warnings; aborting") + } + + // Setup the provisioners + upload := &file.Provisioner{} + err = upload.Prepare(tpl.Provisioners[0].Config) + if err != nil { + t.Fatalf("Error preparing upload: %s", err) + } + download := &file.Provisioner{} + err = download.Prepare(tpl.Provisioners[1].Config) + if err != nil { + t.Fatalf("Error preparing download: %s", err) + } + // Preemptive cleanup + defer os.Remove("delicious-cake") + + // Add hooks so the provisioners run during the build + hooks := map[string][]packer.Hook{} + hooks[packer.HookProvision] = []packer.Hook{ + &packer.ProvisionHook{ + Provisioners: []packer.Provisioner{ + upload, + download, + }, + }, + } + hook := &packer.DispatchHook{Mapping: hooks} + + // Run things + artifact, err := builder.Run(ui, hook, cache) + if err != nil { + t.Fatalf("Error running build %s", err) + } + // Preemptive cleanup + defer artifact.Destroy() + + // Verify that the thing we downloaded is the same thing we sent up. + inputFile, err := ioutil.ReadFile("test-fixtures/cake") + if err != nil { + t.Fatalf("Unable to read input file: %s", err) + } + outputFile, err := ioutil.ReadFile("delicious-cake") + if err != nil { + t.Fatalf("Unable to read output file: %s", err) + } + if sha256.Sum256(inputFile) != sha256.Sum256(outputFile) { + t.Fatalf("Input and output files do not match\nInput:\n%s\nOutput:\n%s\n", inputFile, outputFile) + } +} + +const dockerBuilderConfig = ` +{ + "builders": [ + { + "type": "docker", + "image": "alpine", + "export_path": "alpine.tar", + "run_command": ["-d", "-i", "-t", "{{.Image}}", "/bin/sh"] + } + ], + "provisioners": [ + { + "type": "file", + "source": "test-fixtures/cake", + "destination": "/chocolate-cake" + }, + { + "type": "file", + "source": "/chocolate-cake", + "destination": "delicious-cake", + "direction": "download" + } + ] +} +` From 8cdd07895217f11dc2b8ac34082fc7dc01ed733b Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 12 Aug 2015 11:48:47 -0700 Subject: [PATCH 097/100] Changed fixtures so we can do a directory test too --- builder/docker/test-fixtures/cake | 1 - builder/docker/test-fixtures/manycakes/chocolate | 1 + builder/docker/test-fixtures/manycakes/vanilla | 1 + builder/docker/test-fixtures/onecakes/strawberry | 1 + 4 files changed, 3 insertions(+), 1 deletion(-) delete mode 100644 builder/docker/test-fixtures/cake create mode 100644 builder/docker/test-fixtures/manycakes/chocolate create mode 100644 builder/docker/test-fixtures/manycakes/vanilla create mode 100644 builder/docker/test-fixtures/onecakes/strawberry diff --git a/builder/docker/test-fixtures/cake b/builder/docker/test-fixtures/cake deleted file mode 100644 index 63d40b126..000000000 --- a/builder/docker/test-fixtures/cake +++ /dev/null @@ -1 +0,0 @@ -chocolate cake is delicious diff --git a/builder/docker/test-fixtures/manycakes/chocolate b/builder/docker/test-fixtures/manycakes/chocolate new file mode 100644 index 000000000..a2286c928 --- /dev/null +++ b/builder/docker/test-fixtures/manycakes/chocolate @@ -0,0 +1 @@ +chocolate! diff --git a/builder/docker/test-fixtures/manycakes/vanilla b/builder/docker/test-fixtures/manycakes/vanilla new file mode 100644 index 000000000..000a45578 --- /dev/null +++ b/builder/docker/test-fixtures/manycakes/vanilla @@ -0,0 +1 @@ +vanilla! diff --git a/builder/docker/test-fixtures/onecakes/strawberry b/builder/docker/test-fixtures/onecakes/strawberry new file mode 100644 index 000000000..b663de3a9 --- /dev/null +++ b/builder/docker/test-fixtures/onecakes/strawberry @@ -0,0 +1 @@ +strawberry! From 5ad4b0e97e186f5414045c1aa42b313fb3e6df65 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 12 Aug 2015 12:16:26 -0700 Subject: [PATCH 098/100] Added tests and handle the tar format from docker cp - --- builder/docker/communicator.go | 12 +++++++++++- builder/docker/communicator_test.go | 22 +++++++++++++--------- 2 files changed, 24 insertions(+), 10 deletions(-) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index 8af54bdfe..fb88a4491 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -1,6 +1,7 @@ package docker import ( + "archive/tar" "bytes" "fmt" "io" @@ -210,7 +211,16 @@ func (c *Communicator) Download(src string, dst io.Writer) error { return fmt.Errorf("Failed to start download: %s", err) } - numBytes, err := io.Copy(dst, pipe) + // When you use - to send docker cp to stdout it is streamed as a tar; this + // enables it to work with directories. We don't actually support + // directories in Download() but we still need to handle the tar format. + archive := tar.NewReader(pipe) + _, err = archive.Next() + if err != nil { + return fmt.Errorf("Failed to read header from tar stream: %s", err) + } + + numBytes, err := io.Copy(dst, archive) if err != nil { return fmt.Errorf("Failed to pipe download: %s", err) } diff --git a/builder/docker/communicator_test.go b/builder/docker/communicator_test.go index 221356723..db0bfcfe8 100644 --- a/builder/docker/communicator_test.go +++ b/builder/docker/communicator_test.go @@ -61,8 +61,10 @@ func TestUploadDownload(t *testing.T) { if err != nil { t.Fatalf("Error preparing download: %s", err) } - // Preemptive cleanup - defer os.Remove("delicious-cake") + // Preemptive cleanup. Honestly I don't know why you would want to get rid + // of my strawberry cake. It's so tasty! Do you not like cake? Are you a + // cake-hater? Or are you keeping all the cake all for yourself? So selfish! + defer os.Remove("my-strawberry-cake") // Add hooks so the provisioners run during the build hooks := map[string][]packer.Hook{} @@ -85,16 +87,18 @@ func TestUploadDownload(t *testing.T) { defer artifact.Destroy() // Verify that the thing we downloaded is the same thing we sent up. - inputFile, err := ioutil.ReadFile("test-fixtures/cake") + // Complain loudly if it isn't. + inputFile, err := ioutil.ReadFile("test-fixtures/onecakes/strawberry") if err != nil { t.Fatalf("Unable to read input file: %s", err) } - outputFile, err := ioutil.ReadFile("delicious-cake") + outputFile, err := ioutil.ReadFile("my-strawberry-cake") if err != nil { t.Fatalf("Unable to read output file: %s", err) } if sha256.Sum256(inputFile) != sha256.Sum256(outputFile) { - t.Fatalf("Input and output files do not match\nInput:\n%s\nOutput:\n%s\n", inputFile, outputFile) + t.Fatalf("Input and output files do not match\n"+ + "Input:\n%s\nOutput:\n%s\n", inputFile, outputFile) } } @@ -111,13 +115,13 @@ const dockerBuilderConfig = ` "provisioners": [ { "type": "file", - "source": "test-fixtures/cake", - "destination": "/chocolate-cake" + "source": "test-fixtures/onecakes/strawberry", + "destination": "/strawberry-cake" }, { "type": "file", - "source": "/chocolate-cake", - "destination": "delicious-cake", + "source": "/strawberry-cake", + "destination": "my-strawberry-cake", "direction": "download" } ] From 9ee07f1e8dfae4a06bfaa176ceda61ef414c4a28 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 12 Aug 2015 12:34:52 -0700 Subject: [PATCH 099/100] Add parallel gzip compression to the vagrant post-processor --- post-processor/vagrant/util.go | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/post-processor/vagrant/util.go b/post-processor/vagrant/util.go index a9e745fe6..6ae2c337f 100644 --- a/post-processor/vagrant/util.go +++ b/post-processor/vagrant/util.go @@ -3,14 +3,23 @@ package vagrant import ( "archive/tar" "compress/flate" - "compress/gzip" "encoding/json" "fmt" - "github.com/mitchellh/packer/packer" "io" "log" "os" "path/filepath" + "runtime" + + "github.com/klauspost/pgzip" + "github.com/mitchellh/packer/packer" +) + +var ( + // ErrInvalidCompressionLevel is returned when the compression level passed + // to gzip is not in the expected range. See compress/flate for details. + ErrInvalidCompressionLevel = fmt.Errorf( + "Invalid compression level. Expected an integer from -1 to 9.") ) // Copies a file by copying the contents of the file to another place. @@ -60,10 +69,10 @@ func DirToBox(dst, dir string, ui packer.Ui, level int) error { } defer dstF.Close() - var dstWriter io.Writer = dstF + var dstWriter io.WriteCloser = dstF if level != flate.NoCompression { log.Printf("Compressing with gzip compression level: %d", level) - gzipWriter, err := gzip.NewWriterLevel(dstWriter, level) + gzipWriter, err := makePgzipWriter(dstWriter, level) if err != nil { return err } @@ -143,3 +152,12 @@ func WriteMetadata(dir string, contents interface{}) error { return nil } + +func makePgzipWriter(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) { + gzipWriter, err := pgzip.NewWriterLevel(output, compressionLevel) + if err != nil { + return nil, ErrInvalidCompressionLevel + } + gzipWriter.SetConcurrency(500000, runtime.GOMAXPROCS(-1)) + return gzipWriter, nil +} From 641c8a2ea0f972d2ade2aaac5c3289b3c119ea09 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 12 Aug 2015 14:08:00 -0700 Subject: [PATCH 100/100] Added changelog entries for recently-merged features and fixes --- CHANGELOG.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 90b5bed8b..90529ef1c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,16 @@ +## (Unreleased) + +IMPROVEMENTS: + + * builder/docker: Now supports Download so it can be used with the file + provisioner to download a file from a container. [GH-2585] + * post-processor/vagrant: Like the compress post-processor, vagrant now uses a + parallel gzip algorithm to compress vagrant boxes. [GH-2590] + +BUG FIXES: + + * builded/parallels: Fix interpolation in parallels_tools_guest_path [GH-2543] + ## 0.8.5 (Aug 10, 2015) FEATURES: